code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = 'speech_to_text_2'
__SCREAMING_SNAKE_CASE : str = ['past_key_values']
__SCREAMING_SNAKE_CASE : Dict = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[Any] , UpperCamelCase__ : Union[str, Any]=10_000 , UpperCamelCase__ : List[str]=6 , UpperCamelCase__ : Optional[int]=2_048 , UpperCamelCase__ : int=4 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : int=True , UpperCamelCase__ : str="relu" , UpperCamelCase__ : int=256 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Union[str, Any]=1_024 , **UpperCamelCase__ : str , ):
'''simple docstring'''
lowercase_ = vocab_size
lowercase_ = d_model
lowercase_ = decoder_ffn_dim
lowercase_ = decoder_layers
lowercase_ = decoder_attention_heads
lowercase_ = dropout
lowercase_ = attention_dropout
lowercase_ = activation_dropout
lowercase_ = activation_function
lowercase_ = init_std
lowercase_ = decoder_layerdrop
lowercase_ = use_cache
lowercase_ = decoder_layers
lowercase_ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ = max_target_positions
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
| 650
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 650
| 1
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ :
def __init__( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any]=13 , UpperCamelCase__ : str=30 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : str=3 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str=32 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : Optional[Any]=37 , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Optional[int]=10 , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : Optional[int]=None , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ = (image_size // patch_size) ** 2
lowercase_ = num_patches + 1
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
lowercase_ = TFViTModel(config=UpperCamelCase__ )
lowercase_ = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
lowercase_ = self.image_size // 2
lowercase_ = pixel_values[:, :, :image_size, :image_size]
lowercase_ = model(UpperCamelCase__ , interpolate_pos_encoding=UpperCamelCase__ , training=UpperCamelCase__ )
lowercase_ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = TFViTForImageClassification(UpperCamelCase__ )
lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
lowercase_ = self.image_size // 2
lowercase_ = pixel_values[:, :, :image_size, :image_size]
lowercase_ = model(UpperCamelCase__ , interpolate_pos_encoding=UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = TFViTForImageClassification(UpperCamelCase__ )
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE : Dict = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = TFViTModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , tf.keras.layers.Layer ) )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
lowercase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCAmelCase_ ( ):
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""tf""" )
# forward pass
lowercase_ = model(**UpperCamelCase__ )
# verify the logits
lowercase_ = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowercase_ = tf.constant([-0.2_744, 0.8_215, -0.0_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 )
| 650
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
a = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
for attribute in key.split(""".""" ):
lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
if weight_type is not None:
lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape
else:
lowercase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase_ = value
elif weight_type == "weight_g":
lowercase_ = value
elif weight_type == "weight_v":
lowercase_ = value
elif weight_type == "bias":
lowercase_ = value
else:
lowercase_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = []
lowercase_ = fairseq_model.state_dict()
lowercase_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase_ = None
for name, value in fairseq_dict.items():
lowercase_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == """group""" , )
lowercase_ = True
elif name.split(""".""" )[0] == "proj":
lowercase_ = fairseq_model.proj
lowercase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase_ = True
if "*" in mapped_key:
lowercase_ = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2]
lowercase_ = mapped_key.replace("""*""" , UpperCAmelCase__ )
if "weight_g" in name:
lowercase_ = """weight_g"""
elif "weight_v" in name:
lowercase_ = """weight_v"""
elif "bias" in name:
lowercase_ = """bias"""
elif "weight" in name:
lowercase_ = """weight"""
else:
lowercase_ = None
set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = full_name.split("""conv_layers.""" )[-1]
lowercase_ = name.split(""".""" )
lowercase_ = int(items[0] )
lowercase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ , lowercase_ = emb.weight.shape
lowercase_ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ )
lowercase_ = emb.weight.data
return lin_layer
def UpperCAmelCase_ ( UpperCAmelCase__ ):
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.split(""" """ )[0] for line in lines]
lowercase_ = len(UpperCAmelCase__ )
lowercase_ = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(UpperCAmelCase__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
lowercase_ = WavaVecaConfig.from_pretrained(UpperCAmelCase__ )
lowercase_ = SpeechaTextaConfig.from_pretrained(
UpperCAmelCase__ , vocab_size=UpperCAmelCase__ , decoder_layers=UpperCAmelCase__ , do_stable_layer_norm=UpperCAmelCase__ )
lowercase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
lowercase_ = model[0].eval()
# set weights for wav2vec2 encoder
lowercase_ = WavaVecaModel(UpperCAmelCase__ )
lowercase_ = recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase__ )
lowercase_ = SpeechaTextaForCausalLM(UpperCAmelCase__ )
lowercase_ , lowercase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
lowercase_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowercase_ = SpeechEncoderDecoderModel(encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
lowercase_ = False
# add projection layer
lowercase_ = nn.Parameter(projection_layer.weight )
lowercase_ = nn.Parameter(projection_layer.bias )
lowercase_ = create_vocab_dict(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = SpeechaTextaTokenizer(os.path.join(UpperCAmelCase__ , """vocab.json""" ) )
tokenizer.save_pretrained(UpperCAmelCase__ )
lowercase_ = hf_wavavec.config.to_dict()
lowercase_ = tokenizer.pad_token_id
lowercase_ = tokenizer.bos_token_id
lowercase_ = tokenizer.eos_token_id
lowercase_ = """speech_to_text_2"""
lowercase_ = """wav2vec2"""
lowercase_ = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase__ )
hf_wavavec.save_pretrained(UpperCAmelCase__ )
feature_extractor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0_2_2_4, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 650
| 1
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = 'Hello, World!'
a = 'en_XX'
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = Path("""data_bin""" )
lowercase_ = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(UpperCAmelCase__ ).parent ) , checkpoint_file=Path(UpperCAmelCase__ ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(UpperCAmelCase__ ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(UpperCAmelCase__ ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(UpperCAmelCase__ )
lowercase_ = xmod.model.encoder.sentence_encoder
lowercase_ = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowercase_ = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , UpperCAmelCase__ )
lowercase_ = XmodForSequenceClassification(UpperCAmelCase__ ) if classification_head else XmodForMaskedLM(UpperCAmelCase__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowercase_ = xmod_sent_encoder.embed_tokens.weight
lowercase_ = xmod_sent_encoder.embed_positions.weight
lowercase_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowercase_ = xmod_sent_encoder.layernorm_embedding.weight
lowercase_ = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowercase_ = model.roberta.encoder.layer[i]
lowercase_ = xmod_sent_encoder.layers[i]
# self attention
lowercase_ = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
lowercase_ = xmod_layer.self_attn.q_proj.weight
lowercase_ = xmod_layer.self_attn.q_proj.bias
lowercase_ = xmod_layer.self_attn.k_proj.weight
lowercase_ = xmod_layer.self_attn.k_proj.bias
lowercase_ = xmod_layer.self_attn.v_proj.weight
lowercase_ = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowercase_ = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
lowercase_ = xmod_layer.self_attn.out_proj.weight
lowercase_ = xmod_layer.self_attn.out_proj.bias
lowercase_ = xmod_layer.self_attn_layer_norm.weight
lowercase_ = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowercase_ = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
lowercase_ = xmod_layer.fca.weight
lowercase_ = xmod_layer.fca.bias
# output
lowercase_ = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
lowercase_ = xmod_layer.fca.weight
lowercase_ = xmod_layer.fca.bias
lowercase_ = xmod_layer.final_layer_norm.weight
lowercase_ = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowercase_ = xmod_layer.adapter_layer_norm.weight
lowercase_ = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowercase_ = bert_output.adapter_modules[lang_code]
lowercase_ = xmod_layer.adapter_modules[lang_code]
lowercase_ = from_adapter.fca.weight
lowercase_ = from_adapter.fca.bias
lowercase_ = from_adapter.fca.weight
lowercase_ = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowercase_ = xmod_sent_encoder.layer_norm.weight
lowercase_ = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowercase_ = xmod.model.classification_heads["""mnli"""].dense.weight
lowercase_ = xmod.model.classification_heads["""mnli"""].dense.bias
lowercase_ = xmod.model.classification_heads["""mnli"""].out_proj.weight
lowercase_ = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
lowercase_ = xmod.model.encoder.lm_head.dense.weight
lowercase_ = xmod.model.encoder.lm_head.dense.bias
lowercase_ = xmod.model.encoder.lm_head.layer_norm.weight
lowercase_ = xmod.model.encoder.lm_head.layer_norm.bias
lowercase_ = xmod.model.encoder.lm_head.weight
lowercase_ = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowercase_ = xmod.encode(UpperCAmelCase__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(UpperCAmelCase__ )
lowercase_ = model(UpperCAmelCase__ )[0]
if classification_head:
lowercase_ = xmod.model.classification_heads["""mnli"""](xmod.extract_features(UpperCAmelCase__ ) )
else:
lowercase_ = xmod.model(UpperCAmelCase__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowercase_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
lowercase_ = torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(UpperCAmelCase__ ).mkdir(parents=UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
a = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 650
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
# TODO Update this
a = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Tuple = 'esm'
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Dict=3_072 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=1_026 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Dict=1e-12 , UpperCamelCase__ : List[str]="absolute" , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
lowercase_ = emb_layer_norm_before
lowercase_ = token_dropout
lowercase_ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
lowercase_ = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = EsmFoldConfig(**UpperCamelCase__ )
lowercase_ = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
lowercase_ = get_default_vocab_list()
else:
lowercase_ = vocab_list
else:
lowercase_ = None
lowercase_ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , UpperCamelCase__ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
lowercase_ = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : "TrunkConfig" = None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if self.trunk is None:
lowercase_ = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
lowercase_ = TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = asdict(self )
lowercase_ = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = 48
__SCREAMING_SNAKE_CASE : int = 1024
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Optional[int] = 128
__SCREAMING_SNAKE_CASE : "StructureModuleConfig" = None
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.structure_module is None:
lowercase_ = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
lowercase_ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowercase_ = self.sequence_state_dim // self.sequence_head_width
lowercase_ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = asdict(self )
lowercase_ = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = 384
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 16
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 12
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : float = 0.1
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : int = 7
__SCREAMING_SNAKE_CASE : int = 10
__SCREAMING_SNAKE_CASE : float = 1e-8
__SCREAMING_SNAKE_CASE : float = 1e5
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return asdict(self )
def UpperCAmelCase_ ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 650
| 1
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = MgpstrTokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Optional[int] = {}
__SCREAMING_SNAKE_CASE : List[Any] = False
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
super().setUp()
# fmt: off
lowercase_ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowercase_ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + """\n""" )
def UpperCAmelCase__ ( self : int , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
lowercase_ = """tester"""
lowercase_ = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase_ = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
lowercase_ = tokenizer.encode([special_token] , add_special_tokens=UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , 1 )
lowercase_ = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase_ , lowercase_ = self.get_input_output_texts(UpperCamelCase__ )
lowercase_ = tokenizer.tokenize(UpperCamelCase__ )
lowercase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
lowercase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertNotEqual(len(UpperCamelCase__ ) , 0 )
lowercase_ = tokenizer.decode(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(text_a.replace(""" """ , """""" ) , UpperCamelCase__ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
pass
| 650
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def UpperCAmelCase_ ( UpperCAmelCase__=None ):
if subparsers is not None:
lowercase_ = subparsers.add_parser("""env""" )
else:
lowercase_ = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=UpperCAmelCase__ , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = torch.__version__
lowercase_ = torch.cuda.is_available()
lowercase_ = is_xpu_available()
lowercase_ = is_npu_available()
lowercase_ = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCAmelCase__ ):
lowercase_ = load_config_from_file(args.config_file ).to_dict()
lowercase_ = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""PyTorch XPU available""": str(UpperCAmelCase__ ),
"""PyTorch NPU available""": str(UpperCAmelCase__ ),
"""System RAM""": F'''{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB''',
}
if pt_cuda_available:
lowercase_ = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
lowercase_ = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
else F'''\t{accelerate_config}'''
)
print(UpperCAmelCase__ )
lowercase_ = accelerate_config
return info
def UpperCAmelCase_ ( ):
lowercase_ = env_command_parser()
lowercase_ = parser.parse_args()
env_command(UpperCAmelCase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 650
| 1
|
from collections.abc import Callable
class UpperCamelCase__ :
def __init__( self : List[Any] , UpperCamelCase__ : Callable | None = None ):
'''simple docstring'''
lowercase_ = []
# Stores indexes of each item for supporting updates and deletion.
lowercase_ = {}
# Stores current size of heap.
lowercase_ = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowercase_ = key or (lambda UpperCamelCase__ : x)
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : int ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = int(2 * i + 1 )
return left if 0 < left < self.size else None
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = int(2 * i + 2 )
return right if 0 < right < self.size else None
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ , lowercase_ = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowercase_ , lowercase_ = self.arr[j], self.arr[i]
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = self._left(UpperCamelCase__ )
lowercase_ = self._right(UpperCamelCase__ )
lowercase_ = i
if left is not None and not self._cmp(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = left
if right is not None and not self._cmp(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = right
return valid_parent
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = self._parent(UpperCamelCase__ )
while parent is not None and not self._cmp(UpperCamelCase__ , UpperCamelCase__ ):
self._swap(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ , lowercase_ = parent, self._parent(UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = self._get_valid_parent(UpperCamelCase__ )
while valid_parent != index:
self._swap(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ , lowercase_ = valid_parent, self._get_valid_parent(UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
if item not in self.pos_map:
return
lowercase_ = self.pos_map[item]
lowercase_ = [item, self.key(UpperCamelCase__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(UpperCamelCase__ )
self._heapify_down(UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : int ):
'''simple docstring'''
if item not in self.pos_map:
return
lowercase_ = self.pos_map[item]
del self.pos_map[item]
lowercase_ = self.arr[self.size - 1]
lowercase_ = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(UpperCamelCase__ )
self._heapify_down(UpperCamelCase__ )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(UpperCamelCase__ )] )
else:
lowercase_ = [item, self.key(UpperCamelCase__ )]
lowercase_ = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return self.arr[0] if self.size else None
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def UpperCAmelCase_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCamelCase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Tuple=30 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : int=32 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=2 , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = scope
lowercase_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase_ = (image_size // patch_size) ** 2
lowercase_ = num_patches + 2
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
'''simple docstring'''
lowercase_ = DeiTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = DeiTForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = DeiTForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : str = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = DeiTModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=False ):
'''simple docstring'''
lowercase_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(UpperCamelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowercase_ = model(**UpperCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase_ = False
lowercase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase_ = model_class(UpperCamelCase__ )
model.gradient_checkpointing_enable()
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowercase_ = model(**UpperCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(UpperCamelCase__ ),
*get_values(UpperCamelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
lowercase_ = problem_type["""title"""]
lowercase_ = problem_type["""num_labels"""]
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if problem_type["num_labels"] > 1:
lowercase_ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowercase_ = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=UpperCamelCase__ ) as warning_list:
lowercase_ = model(**UpperCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DeiTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCAmelCase_ ( ):
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
UpperCamelCase__ )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCamelCase__ )
# verify the logits
lowercase_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowercase_ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
lowercase_ = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ )
| 650
| 1
|
import sys
a = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = 1
for digit in s:
product *= int(UpperCAmelCase__ )
return product
def UpperCAmelCase_ ( UpperCAmelCase__ = N ):
lowercase_ = -sys.maxsize - 1
lowercase_ = n[:1_3]
lowercase_ = 1_3
while cur_index < len(UpperCAmelCase__ ) - 1_3:
if int(n[cur_index] ) >= int(substr[0] ):
lowercase_ = substr[1:] + n[cur_index]
cur_index += 1
else:
lowercase_ = max(UpperCAmelCase__ , str_eval(UpperCAmelCase__ ) )
lowercase_ = n[cur_index : cur_index + 1_3]
cur_index += 1_3
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 650
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 650
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 650
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizer
__SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizerFast
__SCREAMING_SNAKE_CASE : List[Any] = True
__SCREAMING_SNAKE_CASE : int = True
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 1_008 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
lowercase_ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCamelCase__ , f.name )
lowercase_ = XGLMTokenizer(f.name , keep_accents=UpperCamelCase__ )
lowercase_ = pickle.dumps(UpperCamelCase__ )
pickle.loads(UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(UpperCamelCase__ )
lowercase_ = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """Hello World!"""
lowercase_ = [2, 31_227, 4_447, 35]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
lowercase_ = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = {
"""input_ids""": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""facebook/xglm-564M""" , padding=UpperCamelCase__ , )
| 650
| 1
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCAmelCase_ ( ):
raise RuntimeError("""CUDA out of memory.""" )
class UpperCamelCase__ ( nn.Module ):
def __init__( self : str ):
'''simple docstring'''
super().__init__()
lowercase_ = nn.Linear(3 , 4 )
lowercase_ = nn.BatchNormad(4 )
lowercase_ = nn.Linear(4 , 5 )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : Any ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(UpperCamelCase__ ) ) )
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCamelCase__ : Optional[Any] ):
nonlocal batch_sizes
batch_sizes.append(UpperCamelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(UpperCamelCase__ , [128, 64, 32, 16, 8] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
nonlocal batch_sizes
batch_sizes.append(UpperCamelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowercase_ , lowercase_ = mock_training_loop_function("""hello""" )
self.assertListEqual(UpperCamelCase__ , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(UpperCamelCase__ : List[str] ):
pass
with self.assertRaises(UpperCamelCase__ ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCamelCase__ : List[Any] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(UpperCamelCase__ ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : int ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(UpperCamelCase__ ) as cm:
mock_training_loop_function(128 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCamelCase__ : Any ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(UpperCamelCase__ ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = torch.cuda.memory_allocated()
lowercase_ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , UpperCamelCase__ )
lowercase_ = release_memory(UpperCamelCase__ )
self.assertEqual(torch.cuda.memory_allocated() , UpperCamelCase__ )
| 650
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
a = None
a = logging.get_logger(__name__)
a = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
a = {
't5-small': 5_1_2,
't5-base': 5_1_2,
't5-large': 5_1_2,
't5-3b': 5_1_2,
't5-11b': 5_1_2,
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : Dict = TaTokenizer
__SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : int , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : Optional[Any]="<pad>" , UpperCamelCase__ : Union[str, Any]=100 , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowercase_ = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase_ = len(set(filter(lambda UpperCamelCase__ : bool("""extra_id_""" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
lowercase_ = vocab_file
lowercase_ = False if not self.vocab_file else True
lowercase_ = extra_ids
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCamelCase__ , )
return max_model_length
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase_ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(R"""<extra_id_\d+>""" , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 650
| 1
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
a = re.compile(R'\s+')
def UpperCAmelCase_ ( UpperCAmelCase__ ):
return {"hash": hashlib.mda(re.sub(SCREAMING_SNAKE_CASE_ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = [len(SCREAMING_SNAKE_CASE_ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(SCREAMING_SNAKE_CASE_ ), "line_max": max(SCREAMING_SNAKE_CASE_ )}
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=5 ):
lowercase_ = ["""auto-generated""", """autogenerated""", """automatically generated"""]
lowercase_ = example["""content"""].splitlines()
for _, line in zip(range(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=5 , UpperCAmelCase__=0.05 ):
lowercase_ = ["""unit tests""", """test file""", """configuration file"""]
lowercase_ = example["""content"""].splitlines()
lowercase_ = 0
lowercase_ = 0
# first test
for _, line in zip(range(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
lowercase_ = example["""content"""].count("""\n""" )
lowercase_ = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = ["""def """, """class """, """for """, """while """]
lowercase_ = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=4 ):
lowercase_ = example["""content"""].splitlines()
lowercase_ = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = tokenizer(example["""content"""] , truncation=SCREAMING_SNAKE_CASE_ )["""input_ids"""]
lowercase_ = len(example["""content"""] ) / len(SCREAMING_SNAKE_CASE_ )
return {"ratio": ratio}
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = {}
results.update(get_hash(SCREAMING_SNAKE_CASE_ ) )
results.update(line_stats(SCREAMING_SNAKE_CASE_ ) )
results.update(alpha_stats(SCREAMING_SNAKE_CASE_ ) )
results.update(char_token_ratio(SCREAMING_SNAKE_CASE_ ) )
results.update(is_autogenerated(SCREAMING_SNAKE_CASE_ ) )
results.update(is_config_or_test(SCREAMING_SNAKE_CASE_ ) )
results.update(has_no_keywords(SCREAMING_SNAKE_CASE_ ) )
results.update(has_few_assignments(SCREAMING_SNAKE_CASE_ ) )
return results
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
if not check_uniques(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCAmelCase_ ( UpperCAmelCase__ ):
with open(SCREAMING_SNAKE_CASE_ , """rb""" ) as f_in:
with gzip.open(str(SCREAMING_SNAKE_CASE_ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
os.unlink(SCREAMING_SNAKE_CASE_ )
# Settings
a = HfArgumentParser(PreprocessingArguments)
a = parser.parse_args()
if args.num_workers is None:
a = multiprocessing.cpu_count()
a = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
a = time.time()
a = load_dataset(args.dataset_name, split='train')
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
a = time.time()
a = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
a = set(ds.unique('hash'))
a = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
a = time.time()
a = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
a = time.time()
a = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
a = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
a = output_dir / "data"
data_dir.mkdir(exist_ok=True)
a = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
a = str(data_dir / F'''file-{file_number+1:012}.json''')
a = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 700
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionDiffEditPipeline
__SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : Any = frozenset([] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , )
lowercase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
lowercase_ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_zero=UpperCamelCase__ , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase_ = CLIPTextModel(UpperCamelCase__ )
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Any=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
lowercase_ = self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe_loaded(**UpperCamelCase__ )[0]
lowercase_ = np.abs(output - output_loaded ).max()
self.assertLess(UpperCamelCase__ , 1e-4 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_mask_inputs(UpperCamelCase__ )
lowercase_ = pipe.generate_mask(**UpperCamelCase__ )
lowercase_ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowercase_ = np.array([0] * 9 )
lowercase_ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
lowercase_ = DPMSolverMultistepScheduler(**UpperCamelCase__ )
lowercase_ = DPMSolverMultistepInverseScheduler(**UpperCamelCase__ )
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
lowercase_ = raw_image.convert("""RGB""" ).resize((768, 768) )
lowercase_ = raw_image
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase_ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ , num_inference_steps=25 , ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 650
| 0
|
import pytest
import datasets
# Import fixture modules as plugins
a = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=__A )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
lowercase_ = tmp_path_factory.getbasetemp() / """cache"""
lowercase_ = test_hf_cache_home / """datasets"""
lowercase_ = test_hf_cache_home / """metrics"""
lowercase_ = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(__A ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(__A ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(__A ) )
lowercase_ = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(__A ) )
lowercase_ = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(__A ) )
@pytest.fixture(autouse=__A , scope="""session""" )
def UpperCAmelCase_ ( ):
datasets.disable_progress_bar()
@pytest.fixture(autouse=__A )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
# don't take tests into account when counting downloads
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , __A )
@pytest.fixture
def UpperCAmelCase_ ( UpperCAmelCase__ ):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , __A )
| 701
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = ['pixel_values']
def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : int = 8 , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_pad
lowercase_ = pad_size
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ )
lowercase_ = (old_height // size + 1) * size - old_height
lowercase_ = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase__ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_pad if do_pad is not None else self.do_pad
lowercase_ = pad_size if pad_size is not None else self.pad_size
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_pad:
lowercase_ = [self.pad(UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650
| 0
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCamelCase__ ( snake_case__ ):
__SCREAMING_SNAKE_CASE : int = (DPMSolverSDEScheduler,)
__SCREAMING_SNAKE_CASE : Optional[Any] = 10
def UpperCAmelCase__ ( self : Any , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
lowercase_ = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**UpperCAmelCase_ )
return config
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_ )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_ )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_ )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_ )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase_ = sample.to(UpperCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
lowercase_ = model(UpperCAmelCase_ , UpperCAmelCase_ )
lowercase_ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowercase_ = output.prev_sample
lowercase_ = torch.sum(torch.abs(UpperCAmelCase_ ) )
lowercase_ = torch.mean(torch.abs(UpperCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1e-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1e-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1e-3
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowercase_ = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase_ = sample.to(UpperCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
lowercase_ = model(UpperCAmelCase_ , UpperCAmelCase_ )
lowercase_ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowercase_ = output.prev_sample
lowercase_ = torch.sum(torch.abs(UpperCAmelCase_ ) )
lowercase_ = torch.mean(torch.abs(UpperCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1e-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1e-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1e-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1e-3
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase_ )
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter.to(UpperCAmelCase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowercase_ = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
lowercase_ = model(UpperCAmelCase_ , UpperCAmelCase_ )
lowercase_ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowercase_ = output.prev_sample
lowercase_ = torch.sum(torch.abs(UpperCAmelCase_ ) )
lowercase_ = torch.mean(torch.abs(UpperCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1e-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1e-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1e-3
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCAmelCase_ , use_karras_sigmas=UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase_ )
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter.to(UpperCAmelCase_ ) * scheduler.init_noise_sigma
lowercase_ = sample.to(UpperCAmelCase_ )
for t in scheduler.timesteps:
lowercase_ = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
lowercase_ = model(UpperCAmelCase_ , UpperCAmelCase_ )
lowercase_ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowercase_ = output.prev_sample
lowercase_ = torch.sum(torch.abs(UpperCAmelCase_ ) )
lowercase_ = torch.mean(torch.abs(UpperCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
| 702
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError("""Input value must be an 'int' type""" )
lowercase_ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
| 0
|
import re
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if len(re.findall("""[ATCG]""" , UpperCAmelCase__ ) ) != len(UpperCAmelCase__ ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ):
@register_to_config
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : bool = False , ):
'''simple docstring'''
super().__init__()
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = False
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
lowercase_ = TaConfig(
vocab_size=UpperCamelCase__ , d_model=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_kv=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , feed_forward_proj=UpperCamelCase__ , is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , )
lowercase_ = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
lowercase_ = TaBlock(UpperCamelCase__ )
self.encoders.append(UpperCamelCase__ )
lowercase_ = TaLayerNorm(UpperCamelCase__ )
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = self.token_embedder(UpperCamelCase__ )
lowercase_ = encoder_input_tokens.shape[1]
lowercase_ = torch.arange(UpperCamelCase__ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase__ )
lowercase_ = self.dropout_pre(UpperCamelCase__ )
# inverted the attention mask
lowercase_ = encoder_input_tokens.size()
lowercase_ = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ )
for lyr in self.encoders:
lowercase_ = lyr(UpperCamelCase__ , UpperCamelCase__ )[0]
lowercase_ = self.layer_norm(UpperCamelCase__ )
return self.dropout_post(UpperCamelCase__ ), encoder_inputs_mask
| 650
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
a = logging.get_logger(__name__)
a = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = 'dpt'
def __init__( self : Tuple , UpperCamelCase__ : List[str]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Union[str, Any]=3_072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : int=1e-12 , UpperCamelCase__ : List[Any]=384 , UpperCamelCase__ : int=16 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Union[str, Any]=[2, 5, 8, 11] , UpperCamelCase__ : Tuple="project" , UpperCamelCase__ : Tuple=[4, 2, 1, 0.5] , UpperCamelCase__ : List[Any]=[96, 192, 384, 768] , UpperCamelCase__ : Any=256 , UpperCamelCase__ : Tuple=-1 , UpperCamelCase__ : Dict=False , UpperCamelCase__ : int=True , UpperCamelCase__ : Dict=0.4 , UpperCamelCase__ : List[str]=255 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Union[str, Any]=[1, 1_024, 24, 24] , UpperCamelCase__ : List[str]=[0, 1] , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
lowercase_ = hidden_size
lowercase_ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
lowercase_ = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
lowercase_ = BitConfig(**UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
lowercase_ = BitConfig(**UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
lowercase_ = backbone_featmap_shape
lowercase_ = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be \'project\' when using `DPT-hybrid` mode.""" )
else:
lowercase_ = None
lowercase_ = None
lowercase_ = []
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = qkv_bias
lowercase_ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of [\'ignore\', \'add\', \'project\']""" )
lowercase_ = readout_type
lowercase_ = reassemble_factors
lowercase_ = neck_hidden_sizes
lowercase_ = fusion_hidden_size
lowercase_ = head_in_index
lowercase_ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowercase_ = use_auxiliary_head
lowercase_ = auxiliary_loss_weight
lowercase_ = semantic_loss_ignore_index
lowercase_ = semantic_classifier_dropout
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowercase_ = self.backbone_config.to_dict()
lowercase_ = self.__class__.model_type
return output
| 704
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a = TypeVar('T')
class UpperCamelCase__ ( Generic[T] ):
__SCREAMING_SNAKE_CASE : deque[T] # Cache store of keys
__SCREAMING_SNAKE_CASE : set[T] # References of the keys in cache
__SCREAMING_SNAKE_CASE : int = 10 # Maximum capacity of cache
def __init__( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = deque()
lowercase_ = set()
if not n:
lowercase_ = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
lowercase_ = n
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : T ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase_ = self.dq_store.pop()
self.key_reference.remove(UpperCamelCase__ )
else:
self.dq_store.remove(UpperCamelCase__ )
self.dq_store.appendleft(UpperCamelCase__ )
self.key_reference.add(UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
for k in self.dq_store:
print(UpperCamelCase__ )
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
a = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 650
| 0
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if (
(cp >= 0X4e_00 and cp <= 0X9f_ff)
or (cp >= 0X34_00 and cp <= 0X4d_bf) #
or (cp >= 0X2_00_00 and cp <= 0X2_a6_df) #
or (cp >= 0X2_a7_00 and cp <= 0X2_b7_3f) #
or (cp >= 0X2_b7_40 and cp <= 0X2_b8_1f) #
or (cp >= 0X2_b8_20 and cp <= 0X2_ce_af) #
or (cp >= 0Xf9_00 and cp <= 0Xfa_ff)
or (cp >= 0X2_f8_00 and cp <= 0X2_fa_1f) #
): #
return True
return False
def UpperCAmelCase_ ( UpperCAmelCase__ ):
for char in word:
lowercase_ = ord(_A )
if not _is_chinese_char(_A ):
return 0
return 1
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = set()
for token in tokens:
lowercase_ = len(_A ) > 1 and is_chinese(_A )
if chinese_word:
word_set.add(_A )
lowercase_ = list(_A )
return word_list
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
if not chinese_word_set:
return bert_tokens
lowercase_ = max([len(_A ) for w in chinese_word_set] )
lowercase_ = bert_tokens
lowercase_ = 0, len(_A )
while start < end:
lowercase_ = True
if is_chinese(bert_word[start] ):
lowercase_ = min(end - start , _A )
for i in range(_A , 1 , -1 ):
lowercase_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase_ = "##" + bert_word[j]
lowercase_ = start + i
lowercase_ = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = []
for i in range(0 , len(_A ) , 1_0_0 ):
lowercase_ = ltp_tokenizer.seg(lines[i : i + 1_0_0] )[0]
lowercase_ = [get_chinese_word(_A ) for r in res]
ltp_res.extend(_A )
assert len(_A ) == len(_A )
lowercase_ = []
for i in range(0 , len(_A ) , 1_0_0 ):
lowercase_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_A , truncation=_A , max_length=5_1_2 )
bert_res.extend(res["""input_ids"""] )
assert len(_A ) == len(_A )
lowercase_ = []
for input_ids, chinese_word in zip(_A , _A ):
lowercase_ = []
for id in input_ids:
lowercase_ = bert_tokenizer._convert_id_to_token(_A )
input_tokens.append(_A )
lowercase_ = add_sub_symbol(_A , _A )
lowercase_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_A ):
if token[:2] == "##":
lowercase_ = token[2:]
# save chinese tokens' pos
if len(_A ) == 1 and _is_chinese_char(ord(_A ) ):
ref_id.append(_A )
ref_ids.append(_A )
assert len(_A ) == len(_A )
return ref_ids
def UpperCAmelCase_ ( UpperCAmelCase__ ):
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.strip() for line in data if len(_A ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase_ = LTP(args.ltp ) # faster in GPU device
lowercase_ = BertTokenizer.from_pretrained(args.bert )
lowercase_ = prepare_ref(_A , _A , _A )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowercase_ = [json.dumps(_A ) + "\n" for ref in ref_ids]
f.writelines(_A )
if __name__ == "__main__":
a = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
a = parser.parse_args()
main(args)
| 705
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
| 0
|
a = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
a = concatenate_datasets
a = DownloadConfig
a = DownloadManager
a = DownloadMode
a = DownloadConfig
a = DownloadMode
a = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 706
|
def UpperCAmelCase_ ( UpperCAmelCase__=2_8_1_2_3 ):
lowercase_ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowercase_ = set()
lowercase_ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(UpperCAmelCase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 650
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
a = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class UpperCamelCase__ ( UpperCAmelCase_ ):
__SCREAMING_SNAKE_CASE : Any = 'albert'
def __init__( self : List[Any] , UpperCamelCase__ : Optional[int]=30_000 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Any=4_096 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Any=64 , UpperCamelCase__ : List[Any]=16_384 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : Optional[Any]="gelu_new" , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Tuple=1e-12 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : List[Any]="absolute" , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Tuple=3 , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
lowercase_ = vocab_size
lowercase_ = embedding_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_hidden_groups
lowercase_ = num_attention_heads
lowercase_ = inner_group_num
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = classifier_dropout_prob
lowercase_ = position_embedding_type
class UpperCamelCase__ ( UpperCAmelCase_ ):
@property
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowercase_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 707
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase__ :
def __init__( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : int=None , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = True
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
lowercase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
# select random slice
lowercase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE : List[str] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """single_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """multi_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = ids_tensor([1, 10] , config.vocab_size )
lowercase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = {"""type""": scaling_type, """factor""": 10.0}
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 650
| 0
|
import argparse
import os
import re
a = 'src/diffusers'
# Pattern that looks at the indentation in a line.
a = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
a = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
a = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
a = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
a = re.compile(R'\[([^\]]+)\]')
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = _re_indent.search(__A )
return "" if search is None else search.groups()[0]
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__="" , UpperCAmelCase__=None , UpperCAmelCase__=None ):
lowercase_ = 0
lowercase_ = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(__A ):
index += 1
lowercase_ = ['''\n'''.join(lines[:index] )]
else:
lowercase_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase_ = [lines[index]]
index += 1
while index < len(__A ) and (end_prompt is None or not lines[index].startswith(__A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(__A ) )
if index < len(__A ) - 1:
lowercase_ = [lines[index + 1]]
index += 1
else:
lowercase_ = []
else:
blocks.append("""\n""".join(__A ) )
lowercase_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__A ) > 0:
blocks.append("""\n""".join(__A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__A ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def UpperCAmelCase_ ( UpperCAmelCase__ ):
def _inner(UpperCAmelCase__ ):
return key(__A ).lower().replace("""_""" , """""" )
return _inner
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=None ):
def noop(UpperCAmelCase__ ):
return x
if key is None:
lowercase_ = noop
# Constants are all uppercase, they go first.
lowercase_ = [obj for obj in objects if key(__A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase_ = [obj for obj in objects if key(__A )[0].isupper() and not key(__A ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase_ = [obj for obj in objects if not key(__A )[0].isupper()]
lowercase_ = ignore_underscore(__A )
return sorted(__A , key=__A ) + sorted(__A , key=__A ) + sorted(__A , key=__A )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
def _replace(UpperCAmelCase__ ):
lowercase_ = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
lowercase_ = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase_ = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(__A )] ) + "]"
lowercase_ = import_statement.split("""\n""" )
if len(__A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase_ = 2 if lines[1].strip() == '''[''' else 1
lowercase_ = [(i, _re_strip_line.search(__A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase_ = sort_objects(__A , key=lambda UpperCAmelCase__ : x[1] )
lowercase_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase_ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase_ = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase_ = keys[:-1]
lowercase_ = get_indent(lines[1] ) + ''', '''.join([F'''"{k}"''' for k in sort_objects(__A )] )
return "\n".join(__A )
else:
# Finally we have to deal with imports fitting on one line
lowercase_ = _re_bracket_content.sub(_replace , __A )
return import_statement
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=True ):
with open(__A , """r""" ) as f:
lowercase_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase_ = split_code_in_indented_blocks(
__A , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase_ = main_blocks[block_idx]
lowercase_ = block.split("""\n""" )
# Get to the start of the imports.
lowercase_ = 0
while line_idx < len(__A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase_ = len(__A )
else:
line_idx += 1
if line_idx >= len(__A ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase_ = '''\n'''.join(block_lines[line_idx:-1] )
lowercase_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase_ = split_code_in_indented_blocks(__A , indent_level=__A )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase_ = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase_ = [(pattern.search(__A ).groups()[0] if pattern.search(__A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase_ = [(i, key) for i, key in enumerate(__A ) if key is not None]
lowercase_ = [x[0] for x in sorted(__A , key=lambda UpperCAmelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase_ = 0
lowercase_ = []
for i in range(len(__A ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowercase_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__A )
count += 1
# And we put our main block back together with its first and last line.
lowercase_ = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__A ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(__A , """w""" ) as f:
f.write("""\n""".join(__A ) )
def UpperCAmelCase_ ( UpperCAmelCase__=True ):
lowercase_ = []
for root, _, files in os.walk(__A ):
if "__init__.py" in files:
lowercase_ = sort_imports(os.path.join(__A , """__init__.py""" ) , check_only=__A )
if result:
lowercase_ = [os.path.join(__A , """__init__.py""" )]
if len(__A ) > 0:
raise ValueError(F'''Would overwrite {len(__A )} files, run `make style`.''' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 708
|
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a = False
a = logging.get_logger(__name__)
a = 'ybelkada/fonts'
def UpperCAmelCase_ ( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
requires_backends(UpperCAmelCase__ , ["""torch"""] )
_check_torch_version()
lowercase_ = image_tensor.unsqueeze(0 )
lowercase_ = torch.nn.functional.unfold(UpperCAmelCase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
lowercase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase__ , UpperCAmelCase__ , -1 )
lowercase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = 3_6 , UpperCAmelCase__ = "black" , UpperCAmelCase__ = "white" , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , ):
requires_backends(UpperCAmelCase__ , """vision""" )
# Add new lines so that each line is no more than 80 characters.
lowercase_ = textwrap.TextWrapper(width=8_0 )
lowercase_ = wrapper.wrap(text=UpperCAmelCase__ )
lowercase_ = """\n""".join(UpperCAmelCase__ )
if font_bytes is not None and font_path is None:
lowercase_ = io.BytesIO(UpperCAmelCase__ )
elif font_path is not None:
lowercase_ = font_path
else:
lowercase_ = hf_hub_download(UpperCAmelCase__ , """Arial.TTF""" )
lowercase_ = ImageFont.truetype(UpperCAmelCase__ , encoding="""UTF-8""" , size=UpperCAmelCase__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
lowercase_ = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , UpperCAmelCase__ ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = temp_draw.textbbox((0, 0) , UpperCAmelCase__ , UpperCAmelCase__ )
# Create the actual image with a bit of padding around the text.
lowercase_ = text_width + left_padding + right_padding
lowercase_ = text_height + top_padding + bottom_padding
lowercase_ = Image.new("""RGB""" , (image_width, image_height) , UpperCAmelCase__ )
lowercase_ = ImageDraw.Draw(UpperCAmelCase__ )
draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase__ , fill=UpperCAmelCase__ , font=UpperCAmelCase__ )
return image
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(UpperCAmelCase__ , """vision""" )
# Convert to PIL image if necessary
lowercase_ = to_pil_image(UpperCAmelCase__ )
lowercase_ = render_text(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase_ = max(header_image.width , image.width )
lowercase_ = int(image.height * (new_width / image.width) )
lowercase_ = int(header_image.height * (new_width / header_image.width) )
lowercase_ = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
lowercase_ = to_numpy_array(UpperCAmelCase__ )
if infer_channel_dimension_format(UpperCAmelCase__ ) == ChannelDimension.LAST:
lowercase_ = to_channel_dimension_format(UpperCAmelCase__ , ChannelDimension.LAST )
return new_image
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Tuple = ['flattened_patches']
def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : int = 2_048 , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
lowercase_ = do_normalize
lowercase_ = do_convert_rgb
lowercase_ = max_patches
lowercase_ = is_vqa
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : dict , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(self.extract_flattened_patches , """torch""" )
_check_torch_version()
# convert to torch
lowercase_ = to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.FIRST )
lowercase_ = torch.from_numpy(UpperCamelCase__ )
lowercase_ , lowercase_ = patch_size["""height"""], patch_size["""width"""]
lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ )
# maximize scale s.t.
lowercase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
lowercase_ = max(min(math.floor(scale * image_height / patch_height ) , UpperCamelCase__ ) , 1 )
lowercase_ = max(min(math.floor(scale * image_width / patch_width ) , UpperCamelCase__ ) , 1 )
lowercase_ = max(num_feasible_rows * patch_height , 1 )
lowercase_ = max(num_feasible_cols * patch_width , 1 )
lowercase_ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=UpperCamelCase__ , antialias=UpperCamelCase__ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
lowercase_ = torch_extract_patches(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = patches.shape
lowercase_ = patches_shape[1]
lowercase_ = patches_shape[2]
lowercase_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
lowercase_ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
lowercase_ = torch.arange(UpperCamelCase__ ).reshape([rows, 1] ).repeat(1 , UpperCamelCase__ ).reshape([rows * columns, 1] )
lowercase_ = torch.arange(UpperCamelCase__ ).reshape([1, columns] ).repeat(UpperCamelCase__ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
lowercase_ = row_ids.to(torch.floataa )
lowercase_ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
lowercase_ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
lowercase_ = torch.nn.functional.pad(UpperCamelCase__ , [0, 0, 0, max_patches - (rows * columns)] ).float()
lowercase_ = to_numpy_array(UpperCamelCase__ )
return result
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict ):
'''simple docstring'''
if image.dtype == np.uinta:
lowercase_ = image.astype(np.floataa )
# take mean across the whole `image`
lowercase_ = np.mean(UpperCamelCase__ )
lowercase_ = np.std(UpperCamelCase__ )
lowercase_ = max(UpperCamelCase__ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase_ = patch_size if patch_size is not None else self.patch_size
lowercase_ = max_patches if max_patches is not None else self.max_patches
lowercase_ = self.is_vqa
if kwargs.get("""data_format""" , UpperCamelCase__ ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase_ = [convert_to_rgb(UpperCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
lowercase_ = kwargs.pop("""font_bytes""" , UpperCamelCase__ )
lowercase_ = kwargs.pop("""font_path""" , UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = [header_text] * len(UpperCamelCase__ )
lowercase_ = [
render_header(UpperCamelCase__ , header_text[i] , font_bytes=UpperCamelCase__ , font_path=UpperCamelCase__ )
for i, image in enumerate(UpperCamelCase__ )
]
if do_normalize:
lowercase_ = [self.normalize(image=UpperCamelCase__ ) for image in images]
# convert to torch tensor and permute
lowercase_ = [
self.extract_flattened_patches(image=UpperCamelCase__ , max_patches=UpperCamelCase__ , patch_size=UpperCamelCase__ )
for image in images
]
# create attention mask in numpy
lowercase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
lowercase_ = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=UpperCamelCase__ )
return encoded_outputs
| 650
| 0
|
import collections
import os
import re
from pathlib import Path
a = 'src/transformers'
# Matches is_xxx_available()
a = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
a = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
a = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
a = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
a = re.compile(R'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
a = re.compile(R'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
a = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
a = re.compile(R'^\s*try:')
# Catches a line with else:
a = re.compile(R'^\s*else:')
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if _re_test_backend.search(__lowerCAmelCase ) is None:
return None
lowercase_ = [b[0] for b in _re_backend.findall(__lowerCAmelCase )]
backends.sort()
return "_and_".join(__lowerCAmelCase )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ = f.readlines()
lowercase_ = 0
while line_index < len(__lowerCAmelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__lowerCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase_ = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
lowercase_ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__lowerCAmelCase ):
lowercase_ = _re_one_line_import_struct.search(__lowerCAmelCase ).groups()[0]
lowercase_ = re.findall(r"""\[([^\]]+)\]""" , __lowerCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
lowercase_ = _re_import_struct_key_value.search(__lowerCAmelCase )
if single_line_import_search is not None:
lowercase_ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(__lowerCAmelCase ) > 0]
objects.extend(__lowerCAmelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
lowercase_ = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
lowercase_ = lines[line_index]
if _re_import_struct_add_one.search(__lowerCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__lowerCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__lowerCAmelCase ) is not None:
lowercase_ = _re_import_struct_add_many.search(__lowerCAmelCase ).groups()[0].split(""", """ )
lowercase_ = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0]
objects.extend(__lowerCAmelCase )
elif _re_between_brackets.search(__lowerCAmelCase ) is not None:
lowercase_ = _re_between_brackets.search(__lowerCAmelCase ).groups()[0].split(""", """ )
lowercase_ = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0]
objects.extend(__lowerCAmelCase )
elif _re_quote_object.search(__lowerCAmelCase ) is not None:
objects.append(_re_quote_object.search(__lowerCAmelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 1_2 + """\"""" ):
objects.append(line[1_3:-3] )
line_index += 1
lowercase_ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase_ = []
while (
line_index < len(__lowerCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
lowercase_ = lines[line_index]
lowercase_ = _re_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase_ = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(__lowerCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
lowercase_ = lines[line_index]
lowercase_ = _re_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
lowercase_ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
def find_duplicates(UpperCAmelCase__ ):
return [k for k, v in collections.Counter(__lowerCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase_ = []
for key in import_dict_objects.keys():
lowercase_ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowercase_ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase_ = """base imports""" if key == """none""" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def UpperCAmelCase_ ( ):
lowercase_ = []
for root, _, files in os.walk(__lowerCAmelCase ):
if "__init__.py" in files:
lowercase_ = os.path.join(__lowerCAmelCase , """__init__.py""" )
lowercase_ = parse_init(__lowerCAmelCase )
if objects is not None:
lowercase_ = analyze_results(*__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
lowercase_ = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(__lowerCAmelCase ) )
if len(__lowerCAmelCase ) > 0:
raise ValueError("""\n\n""".join(__lowerCAmelCase ) )
def UpperCAmelCase_ ( ):
lowercase_ = []
for path, directories, files in os.walk(__lowerCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(__lowerCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__lowerCAmelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
lowercase_ = str((Path(__lowerCAmelCase ) / folder).relative_to(__lowerCAmelCase ) )
lowercase_ = short_path.replace(os.path.sep , """.""" )
submodules.append(__lowerCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
lowercase_ = str((Path(__lowerCAmelCase ) / fname).relative_to(__lowerCAmelCase ) )
lowercase_ = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(__lowerCAmelCase )
return submodules
a = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def UpperCAmelCase_ ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowercase_ = direct_transformers_import(__lowerCAmelCase )
lowercase_ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" ) as f:
lowercase_ = f.read()
import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""" , __lowerCAmelCase ) ) )
lowercase_ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__lowerCAmelCase ) > 0:
lowercase_ = """\n""".join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
F'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 709
|
import cva
import numpy as np
class UpperCamelCase__ :
def __init__( self : List[str] , UpperCamelCase__ : float , UpperCamelCase__ : int ):
'''simple docstring'''
if k in (0.04, 0.06):
lowercase_ = k
lowercase_ = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Optional[int] ):
'''simple docstring'''
return str(self.k )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = cva.imread(UpperCamelCase__ , 0 )
lowercase_ , lowercase_ = img.shape
lowercase_ = []
lowercase_ = img.copy()
lowercase_ = cva.cvtColor(UpperCamelCase__ , cva.COLOR_GRAY2RGB )
lowercase_ , lowercase_ = np.gradient(UpperCamelCase__ )
lowercase_ = dx**2
lowercase_ = dy**2
lowercase_ = dx * dy
lowercase_ = 0.04
lowercase_ = self.window_size // 2
for y in range(UpperCamelCase__ , h - offset ):
for x in range(UpperCamelCase__ , w - offset ):
lowercase_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = (wxx * wyy) - (wxy**2)
lowercase_ = wxx + wyy
lowercase_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
a = HarrisCorner(0.04, 3)
a , a = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 650
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 710
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
a = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
a = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowercase_ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ = numpy_to_pil(UpperCAmelCase__ )
return images
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if images.ndim == 3:
lowercase_ = images[None, ...]
lowercase_ = (images * 2_5_5).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase_ = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
lowercase_ = [Image.fromarray(UpperCAmelCase__ ) for image in images]
return pil_images
| 650
| 0
|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_lowerCAmelCase , _lowerCAmelCase ) ) )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
if dataset.ndim != value_array.ndim:
lowercase_ = (
"Wrong input data's dimensions... "
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(_lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
lowercase_ = (
"Wrong input data's shape... "
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(_lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
lowercase_ = (
"Input data have different datatype... "
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(_lowerCAmelCase )
lowercase_ = []
for value in value_array:
lowercase_ = euclidean(_lowerCAmelCase , dataset[0] )
lowercase_ = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowercase_ = euclidean(_lowerCAmelCase , _lowerCAmelCase )
if dist > temp_dist:
lowercase_ = temp_dist
lowercase_ = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
return np.dot(_lowerCAmelCase , _lowerCAmelCase ) / (norm(_lowerCAmelCase ) * norm(_lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = (UnCLIPScheduler,)
def UpperCAmelCase__ ( self : int , **UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = {
"""num_train_timesteps""": 1_000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**UpperCamelCase__ )
return config
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(variance_type="""fixed_small_log""" )
lowercase_ = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(variance_type="""learned_range""" )
lowercase_ = scheduler_class(**UpperCamelCase__ )
lowercase_ = 0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1e-5
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCamelCase__ )
lowercase_ = scheduler.timesteps
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
lowercase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(25 )
lowercase_ = scheduler.timesteps
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
lowercase_ = None
else:
lowercase_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase_ = scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
| 650
| 0
|
class UpperCamelCase__ :
def __init__( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = name
lowercase_ = value
lowercase_ = weight
def __repr__( self : Tuple ):
'''simple docstring'''
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.value
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return self.name
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return self.weight
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.value / self.weight
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
lowercase_ = []
for i in range(len(__lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]:
lowercase_ = sorted(__lowerCAmelCase , key=__lowerCAmelCase , reverse=__lowerCAmelCase )
lowercase_ = []
lowercase_ = 0.0, 0.0
for i in range(len(__lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCAmelCase_ ( ) -> List[str]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
a = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the training data.'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the validation data.'} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1_5 , metadata={'help': 'Percent to split off of train for validation.'} )
__SCREAMING_SNAKE_CASE : int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} )
__SCREAMING_SNAKE_CASE : float = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = {}
if self.train_dir is not None:
lowercase_ = self.train_dir
if self.validation_dir is not None:
lowercase_ = self.validation_dir
lowercase_ = data_files if data_files else None
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = field(
default=__magic_name__ , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__magic_name__ )} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
__SCREAMING_SNAKE_CASE : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__SCREAMING_SNAKE_CASE : str = field(default=__magic_name__ , metadata={'help': 'Name or path of preprocessor config.'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={'help': 'Stride to use for the encoder.'} , )
class UpperCamelCase__ :
def __init__( self : Dict , UpperCamelCase__ : List[Any]=192 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : str=0.6 ):
'''simple docstring'''
lowercase_ = input_size
lowercase_ = mask_patch_size
lowercase_ = model_patch_size
lowercase_ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
lowercase_ = self.input_size // self.mask_patch_size
lowercase_ = self.mask_patch_size // self.model_patch_size
lowercase_ = self.rand_size**2
lowercase_ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : int ):
'''simple docstring'''
lowercase_ = np.random.permutation(self.token_count )[: self.mask_count]
lowercase_ = np.zeros(self.token_count , dtype=UpperCamelCase__ )
lowercase_ = 1
lowercase_ = mask.reshape((self.rand_size, self.rand_size) )
lowercase_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = torch.stack([example["""pixel_values"""] for example in examples] )
lowercase_ = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def UpperCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , UpperCAmelCase__ , UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase_ = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCAmelCase__ ) and data_args.train_val_split > 0.0:
lowercase_ = ds["""train"""].train_test_split(data_args.train_val_split )
lowercase_ = split["""train"""]
lowercase_ = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowercase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(UpperCAmelCase__ , """decoder_type""" ):
lowercase_ = """simmim"""
# adapt config
lowercase_ = model_args.image_size if model_args.image_size is not None else config.image_size
lowercase_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowercase_ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowercase_ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowercase_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowercase_ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowercase_ = AutoModelForMaskedImageModeling.from_config(UpperCAmelCase__ )
if training_args.do_train:
lowercase_ = ds["""train"""].column_names
else:
lowercase_ = ds["""validation"""].column_names
if data_args.image_column_name is not None:
lowercase_ = data_args.image_column_name
elif "image" in column_names:
lowercase_ = """image"""
elif "img" in column_names:
lowercase_ = """img"""
else:
lowercase_ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowercase_ = Compose(
[
Lambda(lambda UpperCAmelCase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowercase_ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(UpperCAmelCase__ ):
lowercase_ = [transforms(UpperCAmelCase__ ) for image in examples[image_column_name]]
lowercase_ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
lowercase_ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCAmelCase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
lowercase_ = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCAmelCase__ )
# Initialize our trainer
lowercase_ = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
lowercase_ = None
if training_args.resume_from_checkpoint is not None:
lowercase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ = last_checkpoint
lowercase_ = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase_ = trainer.evaluate()
trainer.log_metrics("""eval""" , UpperCAmelCase__ )
trainer.save_metrics("""eval""" , UpperCAmelCase__ )
# Write model card and (optionally) push to hub
lowercase_ = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase__ )
else:
trainer.create_model_card(**UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 650
| 0
|
from __future__ import annotations
from collections import Counter
from random import random
class UpperCamelCase__ :
def __init__( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = {}
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = {}
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(UpperCamelCase__ )
if nodea not in self.connections:
self.add_node(UpperCamelCase__ )
lowercase_ = probability
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return list(self.connections )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = 0
lowercase_ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowercase_ = Counter(graph.get_nodes() )
lowercase_ = start
for _ in range(__UpperCAmelCase ):
lowercase_ = graph.transition(__UpperCAmelCase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : List[Any] = ['pixel_values']
def __init__( self : List[str] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = size if size is not None else {"""shortest_edge""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowercase_ = int((256 / 224) * size["""shortest_edge"""] )
lowercase_ = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
UpperCamelCase__ , size=(size_dict["""height"""], size_dict["""width"""]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[TensorType] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else self.crop_size
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
lowercase_ = [self.resize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_center_crop:
lowercase_ = [self.center_crop(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650
| 0
|
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
lowercase_ , lowercase_ = grid.shape
lowercase_ = [-1, 1, 0, 0]
lowercase_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase_ , lowercase_ = [(0, source)], set()
lowercase_ = np.full((rows, cols) , np.inf )
lowercase_ = 0
lowercase_ = np.empty((rows, cols) , dtype=lowerCAmelCase__ )
lowercase_ = None
while queue:
((lowercase_) , (lowercase_)) = heappop(lowerCAmelCase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase_ = []
while (x, y) != source:
path.append((x, y) )
lowercase_ , lowercase_ = predecessors[x, y]
path.append(lowerCAmelCase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowerCAmelCase__ ) ):
lowercase_ , lowercase_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowerCAmelCase__ , (dist + 1, (nx, ny)) )
lowercase_ = dist + 1
lowercase_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 650
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class UpperCamelCase__ ( __lowerCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = 'roc_bert'
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any]=30_522 , UpperCamelCase__ : int=768 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : List[str]=12 , UpperCamelCase__ : Tuple=3_072 , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Tuple=0.02 , UpperCamelCase__ : List[Any]=1e-12 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : int="absolute" , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : Tuple=768 , UpperCamelCase__ : int=910 , UpperCamelCase__ : Optional[Any]=512 , UpperCamelCase__ : List[str]=24_858 , UpperCamelCase__ : Tuple=True , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = type_vocab_size
lowercase_ = layer_norm_eps
lowercase_ = use_cache
lowercase_ = enable_pronunciation
lowercase_ = enable_shape
lowercase_ = pronunciation_embed_dim
lowercase_ = pronunciation_vocab_size
lowercase_ = shape_embed_dim
lowercase_ = shape_vocab_size
lowercase_ = concat_input
lowercase_ = position_embedding_type
lowercase_ = classifier_dropout
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
| 715
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
a = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
for attribute in key.split(""".""" ):
lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
if weight_type is not None:
lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape
else:
lowercase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase_ = value
elif weight_type == "weight_g":
lowercase_ = value
elif weight_type == "weight_v":
lowercase_ = value
elif weight_type == "bias":
lowercase_ = value
else:
lowercase_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = []
lowercase_ = fairseq_model.state_dict()
lowercase_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase_ = None
for name, value in fairseq_dict.items():
lowercase_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == """group""" , )
lowercase_ = True
elif name.split(""".""" )[0] == "proj":
lowercase_ = fairseq_model.proj
lowercase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase_ = True
if "*" in mapped_key:
lowercase_ = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2]
lowercase_ = mapped_key.replace("""*""" , UpperCAmelCase__ )
if "weight_g" in name:
lowercase_ = """weight_g"""
elif "weight_v" in name:
lowercase_ = """weight_v"""
elif "bias" in name:
lowercase_ = """bias"""
elif "weight" in name:
lowercase_ = """weight"""
else:
lowercase_ = None
set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = full_name.split("""conv_layers.""" )[-1]
lowercase_ = name.split(""".""" )
lowercase_ = int(items[0] )
lowercase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ , lowercase_ = emb.weight.shape
lowercase_ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ )
lowercase_ = emb.weight.data
return lin_layer
def UpperCAmelCase_ ( UpperCAmelCase__ ):
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.split(""" """ )[0] for line in lines]
lowercase_ = len(UpperCAmelCase__ )
lowercase_ = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(UpperCAmelCase__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
lowercase_ = WavaVecaConfig.from_pretrained(UpperCAmelCase__ )
lowercase_ = SpeechaTextaConfig.from_pretrained(
UpperCAmelCase__ , vocab_size=UpperCAmelCase__ , decoder_layers=UpperCAmelCase__ , do_stable_layer_norm=UpperCAmelCase__ )
lowercase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
lowercase_ = model[0].eval()
# set weights for wav2vec2 encoder
lowercase_ = WavaVecaModel(UpperCAmelCase__ )
lowercase_ = recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase__ )
lowercase_ = SpeechaTextaForCausalLM(UpperCAmelCase__ )
lowercase_ , lowercase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
lowercase_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowercase_ = SpeechEncoderDecoderModel(encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
lowercase_ = False
# add projection layer
lowercase_ = nn.Parameter(projection_layer.weight )
lowercase_ = nn.Parameter(projection_layer.bias )
lowercase_ = create_vocab_dict(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = SpeechaTextaTokenizer(os.path.join(UpperCAmelCase__ , """vocab.json""" ) )
tokenizer.save_pretrained(UpperCAmelCase__ )
lowercase_ = hf_wavavec.config.to_dict()
lowercase_ = tokenizer.pad_token_id
lowercase_ = tokenizer.bos_token_id
lowercase_ = tokenizer.eos_token_id
lowercase_ = """speech_to_text_2"""
lowercase_ = """wav2vec2"""
lowercase_ = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase__ )
hf_wavavec.save_pretrained(UpperCAmelCase__ )
feature_extractor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0_2_2_4, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 650
| 0
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : Optional[int]=32 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : List[str]=10 , UpperCamelCase__ : Dict=[10, 20, 30, 40] , UpperCamelCase__ : Any=[1, 1, 2, 1] , UpperCamelCase__ : Any=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Any="relu" , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : List[str]=None , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = num_channels
lowercase_ = embeddings_size
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_act
lowercase_ = num_labels
lowercase_ = scope
lowercase_ = len(_A )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = TFResNetModel(config=_A )
lowercase_ = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict ):
'''simple docstring'''
lowercase_ = self.num_labels
lowercase_ = TFResNetForImageClassification(_A )
lowercase_ = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ = config_and_inputs
lowercase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( a__ , a__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE : Tuple = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = TFResNetModelTester(self )
lowercase_ = ConfigTester(self , config_class=_A , has_text_modality=_A )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(_A )
lowercase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any ):
lowercase_ = model_class(_A )
lowercase_ = model(**self._prepare_for_class(_A , _A ) )
lowercase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ = layer_type
lowercase_ = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
check_hidden_states_output(_A , _A , _A )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = TFResNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCAmelCase_ ( ):
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=_A , return_tensors="""tf""" )
# forward pass
lowercase_ = model(**_A )
# verify the logits
lowercase_ = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _A )
lowercase_ = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _A , atol=1e-4 ) )
| 716
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
# TODO Update this
a = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Tuple = 'esm'
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Dict=3_072 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=1_026 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Dict=1e-12 , UpperCamelCase__ : List[str]="absolute" , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
lowercase_ = emb_layer_norm_before
lowercase_ = token_dropout
lowercase_ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
lowercase_ = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = EsmFoldConfig(**UpperCamelCase__ )
lowercase_ = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
lowercase_ = get_default_vocab_list()
else:
lowercase_ = vocab_list
else:
lowercase_ = None
lowercase_ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , UpperCamelCase__ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
lowercase_ = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : "TrunkConfig" = None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if self.trunk is None:
lowercase_ = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
lowercase_ = TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = asdict(self )
lowercase_ = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = 48
__SCREAMING_SNAKE_CASE : int = 1024
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Optional[int] = 128
__SCREAMING_SNAKE_CASE : "StructureModuleConfig" = None
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.structure_module is None:
lowercase_ = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
lowercase_ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowercase_ = self.sequence_state_dim // self.sequence_head_width
lowercase_ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = asdict(self )
lowercase_ = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = 384
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 16
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 12
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : float = 0.1
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : int = 7
__SCREAMING_SNAKE_CASE : int = 10
__SCREAMING_SNAKE_CASE : float = 1e-8
__SCREAMING_SNAKE_CASE : float = 1e5
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return asdict(self )
def UpperCAmelCase_ ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 650
| 0
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""Input value must be an 'int' type""" )
lowercase_ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def UpperCAmelCase_ ( UpperCAmelCase__=None ):
if subparsers is not None:
lowercase_ = subparsers.add_parser("""env""" )
else:
lowercase_ = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=UpperCAmelCase__ , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = torch.__version__
lowercase_ = torch.cuda.is_available()
lowercase_ = is_xpu_available()
lowercase_ = is_npu_available()
lowercase_ = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCAmelCase__ ):
lowercase_ = load_config_from_file(args.config_file ).to_dict()
lowercase_ = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""PyTorch XPU available""": str(UpperCAmelCase__ ),
"""PyTorch NPU available""": str(UpperCAmelCase__ ),
"""System RAM""": F'''{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB''',
}
if pt_cuda_available:
lowercase_ = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
lowercase_ = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
else F'''\t{accelerate_config}'''
)
print(UpperCAmelCase__ )
lowercase_ = accelerate_config
return info
def UpperCAmelCase_ ( ):
lowercase_ = env_command_parser()
lowercase_ = parser.parse_args()
env_command(UpperCAmelCase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 650
| 0
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
a = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = _TestCommandArgs(dataset=a_ , all_configs=a_ , save_infos=a_ )
lowercase_ = TestCommand(*a_ )
test_command.run()
lowercase_ = os.path.join(a_ , """README.md""" )
assert os.path.exists(a_ )
lowercase_ = DatasetInfosDict.from_directory(a_ )
lowercase_ = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2_3_5_1_5_6_3,
"""num_examples""": 1_0_0_0_0,
},
{
"""name""": """validation""",
"""num_bytes""": 2_3_8_4_1_8,
"""num_examples""": 1_0_0_0,
},
] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase_ = getattr(dataset_infos["""default"""] , a_ ), getattr(expected_dataset_infos["""default"""] , a_ )
if key == "num_bytes":
assert is_apercent_close(a_ , a_ )
elif key == "splits":
assert list(a_ ) == list(a_ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 718
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCamelCase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Tuple=30 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : int=32 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=2 , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = scope
lowercase_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase_ = (image_size // patch_size) ** 2
lowercase_ = num_patches + 2
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
'''simple docstring'''
lowercase_ = DeiTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = DeiTForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = DeiTForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : str = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = DeiTModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=False ):
'''simple docstring'''
lowercase_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(UpperCamelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowercase_ = model(**UpperCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase_ = False
lowercase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase_ = model_class(UpperCamelCase__ )
model.gradient_checkpointing_enable()
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowercase_ = model(**UpperCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(UpperCamelCase__ ),
*get_values(UpperCamelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
lowercase_ = problem_type["""title"""]
lowercase_ = problem_type["""num_labels"""]
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if problem_type["num_labels"] > 1:
lowercase_ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowercase_ = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=UpperCamelCase__ ) as warning_list:
lowercase_ = model(**UpperCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DeiTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCAmelCase_ ( ):
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
UpperCamelCase__ )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCamelCase__ )
# verify the logits
lowercase_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowercase_ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
lowercase_ = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ )
| 650
| 0
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if is_torch_version("""<""" , """2.0.0""" ) or not hasattr(_A , """_dynamo""" ):
return False
return isinstance(_A , torch._dynamo.eval_frame.OptimizedModule )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = True ):
lowercase_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowercase_ = is_compiled_module(_A )
if is_compiled:
lowercase_ = model
lowercase_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_A , _A ):
lowercase_ = model.module
if not keep_fpaa_wrapper:
lowercase_ = getattr(_A , """forward""" )
lowercase_ = model.__dict__.pop("""_original_forward""" , _A )
if original_forward is not None:
while hasattr(_A , """__wrapped__""" ):
lowercase_ = forward.__wrapped__
if forward == original_forward:
break
lowercase_ = forward
if getattr(_A , """_converted_to_transformer_engine""" , _A ):
convert_model(_A , to_transformer_engine=_A )
if is_compiled:
lowercase_ = model
lowercase_ = compiled_model
return model
def UpperCAmelCase_ ( ):
PartialState().wait_for_everyone()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_A , _A )
elif PartialState().local_process_index == 0:
torch.save(_A , _A )
@contextmanager
def UpperCAmelCase_ ( **UpperCAmelCase__ ):
for key, value in kwargs.items():
lowercase_ = str(_A )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if not hasattr(_A , """__qualname__""" ) and not hasattr(_A , """__name__""" ):
lowercase_ = getattr(_A , """__class__""" , _A )
if hasattr(_A , """__qualname__""" ):
return obj.__qualname__
if hasattr(_A , """__name__""" ):
return obj.__name__
return str(_A )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
for key, value in source.items():
if isinstance(_A , _A ):
lowercase_ = destination.setdefault(_A , {} )
merge_dicts(_A , _A )
else:
lowercase_ = value
return destination
def UpperCAmelCase_ ( UpperCAmelCase__ = None ):
if port is None:
lowercase_ = 2_9_5_0_0
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("""localhost""", port) ) == 0
| 719
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 650
| 0
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
lowercase_ = sum(UpperCAmelCase__ ) / len(UpperCAmelCase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizer
__SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizerFast
__SCREAMING_SNAKE_CASE : List[Any] = True
__SCREAMING_SNAKE_CASE : int = True
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 1_008 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
lowercase_ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCamelCase__ , f.name )
lowercase_ = XGLMTokenizer(f.name , keep_accents=UpperCamelCase__ )
lowercase_ = pickle.dumps(UpperCamelCase__ )
pickle.loads(UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(UpperCamelCase__ )
lowercase_ = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """Hello World!"""
lowercase_ = [2, 31_227, 4_447, 35]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
lowercase_ = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = {
"""input_ids""": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""facebook/xglm-564M""" , padding=UpperCamelCase__ , )
| 650
| 0
|
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
@register_to_config
def __init__( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] = None , UpperCamelCase__ : List[str] = None ):
'''simple docstring'''
super().__init__()
lowercase_ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
lowercase_ = torch.zeros(_lowercase , _lowercase )
else:
lowercase_ = None
lowercase_ = torch.nn.Parameter(_lowercase )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 42
__SCREAMING_SNAKE_CASE : Union[str, Any] = 42
__SCREAMING_SNAKE_CASE : List[Any] = 42
__SCREAMING_SNAKE_CASE : Tuple = 42
__SCREAMING_SNAKE_CASE : Any = 42
__SCREAMING_SNAKE_CASE : List[str] = 42
def __init__( self : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=_lowercase , transformer=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = len(_lowercase ) if isinstance(_lowercase , _lowercase ) else 1
# get prompt text embeddings
lowercase_ = self.tokenizer(
_lowercase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
lowercase_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowercase_ = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
lowercase_ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_lowercase )
# duplicate text embeddings for each generation per prompt
lowercase_ = prompt_embeds.repeat_interleave(_lowercase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
lowercase_ = self.learned_classifier_free_sampling_embeddings.embeddings
lowercase_ = negative_prompt_embeds.unsqueeze(0 ).repeat(_lowercase , 1 , 1 )
else:
lowercase_ = [""""""] * batch_size
lowercase_ = text_input_ids.shape[-1]
lowercase_ = self.tokenizer(
_lowercase , padding="""max_length""" , max_length=_lowercase , truncation=_lowercase , return_tensors="""pt""" , )
lowercase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
lowercase_ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_lowercase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase_ = negative_prompt_embeds.shape[1]
lowercase_ = negative_prompt_embeds.repeat(1 , _lowercase , 1 )
lowercase_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _lowercase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] = 100 , UpperCamelCase__ : Optional[int] = 5.0 , UpperCamelCase__ : Dict = 1.0 , UpperCamelCase__ : str = 1 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Any] = None , UpperCamelCase__ : Any = "pil" , UpperCamelCase__ : Union[str, Any] = True , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Any] = 1 , ):
'''simple docstring'''
if isinstance(_lowercase , _lowercase ):
lowercase_ = 1
elif isinstance(_lowercase , _lowercase ):
lowercase_ = len(_lowercase )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_lowercase )}''' )
lowercase_ = batch_size * num_images_per_prompt
lowercase_ = guidance_scale > 1.0
lowercase_ = self._encode_prompt(_lowercase , _lowercase , _lowercase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowercase , _lowercase ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(_lowercase )}.''' )
# get the initial completely masked latents unless the user supplied it
lowercase_ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
lowercase_ = self.transformer.num_vector_embeds - 1
lowercase_ = torch.full(_lowercase , _lowercase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
lowercase_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowercase , device=self.device )
lowercase_ = self.scheduler.timesteps.to(self.device )
lowercase_ = latents
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the sample if we are doing classifier free guidance
lowercase_ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
lowercase_ = self.transformer(_lowercase , encoder_hidden_states=_lowercase , timestep=_lowercase ).sample
if do_classifier_free_guidance:
lowercase_ = model_output.chunk(2 )
lowercase_ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_lowercase , dim=1 , keepdim=_lowercase )
lowercase_ = self.truncate(_lowercase , _lowercase )
# remove `log(0)`'s (`-inf`s)
lowercase_ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ = self.scheduler.step(_lowercase , timestep=_lowercase , sample=_lowercase , generator=_lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowercase , _lowercase , _lowercase )
lowercase_ = self.vqvae.config.vq_embed_dim
lowercase_ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
lowercase_ = self.vqvae.quantize.get_codebook_entry(_lowercase , shape=_lowercase )
lowercase_ = self.vqvae.decode(_lowercase , force_not_quantize=_lowercase ).sample
lowercase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase_ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = torch.sort(_lowercase , 1 , descending=_lowercase )
lowercase_ = torch.exp(_lowercase )
lowercase_ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
lowercase_ = torch.full_like(keep_mask[:, 0:1, :] , _lowercase )
lowercase_ = torch.cat((all_true, keep_mask) , dim=1 )
lowercase_ = keep_mask[:, :-1, :]
lowercase_ = keep_mask.gather(1 , indices.argsort(1 ) )
lowercase_ = log_p_x_0.clone()
lowercase_ = -torch.inf # -inf = log(0)
return rv
| 721
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
a = None
a = logging.get_logger(__name__)
a = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
a = {
't5-small': 5_1_2,
't5-base': 5_1_2,
't5-large': 5_1_2,
't5-3b': 5_1_2,
't5-11b': 5_1_2,
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : Dict = TaTokenizer
__SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : int , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : Optional[Any]="<pad>" , UpperCamelCase__ : Union[str, Any]=100 , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowercase_ = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase_ = len(set(filter(lambda UpperCamelCase__ : bool("""extra_id_""" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
lowercase_ = vocab_file
lowercase_ = False if not self.vocab_file else True
lowercase_ = extra_ids
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCamelCase__ , )
return max_model_length
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase_ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(R"""<extra_id_\d+>""" , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 650
| 0
|
def UpperCAmelCase_ ( UpperCAmelCase__ = 2_0_0_0_0_0_0 ):
lowercase_ = [0 for i in range(n + 1 )]
lowercase_ = 1
lowercase_ = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , snake_case__ ):
lowercase_ = 1
lowercase_ = 0
for i in range(snake_case__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''')
| 700
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionDiffEditPipeline
__SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : Any = frozenset([] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , )
lowercase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
lowercase_ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_zero=UpperCamelCase__ , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase_ = CLIPTextModel(UpperCamelCase__ )
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Any=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
lowercase_ = self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe_loaded(**UpperCamelCase__ )[0]
lowercase_ = np.abs(output - output_loaded ).max()
self.assertLess(UpperCamelCase__ , 1e-4 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_mask_inputs(UpperCamelCase__ )
lowercase_ = pipe.generate_mask(**UpperCamelCase__ )
lowercase_ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowercase_ = np.array([0] * 9 )
lowercase_ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
lowercase_ = DPMSolverMultistepScheduler(**UpperCamelCase__ )
lowercase_ = DPMSolverMultistepInverseScheduler(**UpperCamelCase__ )
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
lowercase_ = raw_image.convert("""RGB""" ).resize((768, 768) )
lowercase_ = raw_image
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase_ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ , num_inference_steps=25 , ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 650
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
a = {'tokenization_herbert': ['HerbertTokenizer']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['HerbertTokenizerFast']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = ['pixel_values']
def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : int = 8 , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_pad
lowercase_ = pad_size
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ )
lowercase_ = (old_height // size + 1) * size - old_height
lowercase_ = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase__ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_pad if do_pad is not None else self.do_pad
lowercase_ = pad_size if pad_size is not None else self.pad_size
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_pad:
lowercase_ = [self.pad(UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650
| 0
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = 42
@flax_register_to_config
class UpperCamelCase__ ( nn.Module , __magic_name__ , __magic_name__ ):
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : str = 4
__SCREAMING_SNAKE_CASE : str = 4
__SCREAMING_SNAKE_CASE : List[str] = (
'CrossAttnDownBlock2D',
'CrossAttnDownBlock2D',
'CrossAttnDownBlock2D',
'DownBlock2D',
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = ('UpBlock2D', 'CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'CrossAttnUpBlock2D')
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : str = (320, 640, 1280, 1280)
__SCREAMING_SNAKE_CASE : Tuple = 2
__SCREAMING_SNAKE_CASE : List[Any] = 8
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : List[str] = 1280
__SCREAMING_SNAKE_CASE : str = 0.0
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : List[Any] = jnp.floataa
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : List[Any] = 0
__SCREAMING_SNAKE_CASE : Optional[int] = False
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase_ = jnp.zeros(UpperCamelCase__ , dtype=jnp.floataa )
lowercase_ = jnp.ones((1,) , dtype=jnp.intaa )
lowercase_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase_ , lowercase_ = jax.random.split(UpperCamelCase__ )
lowercase_ = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )["params"]
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = self.block_out_channels
lowercase_ = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"""At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase_ = self.num_attention_heads or self.attention_head_dim
# input
lowercase_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase_ = FlaxTimestepEmbedding(UpperCamelCase__ , dtype=self.dtype )
lowercase_ = self.only_cross_attention
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase_ = []
lowercase_ = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
lowercase_ = output_channel
lowercase_ = block_out_channels[i]
lowercase_ = i == len(UpperCamelCase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase_ = FlaxCrossAttnDownBlockaD(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowercase_ = FlaxDownBlockaD(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCamelCase__ )
lowercase_ = down_blocks
# mid
lowercase_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowercase_ = []
lowercase_ = list(reversed(UpperCamelCase__ ) )
lowercase_ = list(reversed(UpperCamelCase__ ) )
lowercase_ = list(reversed(UpperCamelCase__ ) )
lowercase_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
lowercase_ = output_channel
lowercase_ = reversed_block_out_channels[i]
lowercase_ = reversed_block_out_channels[min(i + 1 , len(UpperCamelCase__ ) - 1 )]
lowercase_ = i == len(UpperCamelCase__ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowercase_ = FlaxCrossAttnUpBlockaD(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowercase_ = FlaxUpBlockaD(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(UpperCamelCase__ )
lowercase_ = output_channel
lowercase_ = up_blocks
# out
lowercase_ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowercase_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any] = True , UpperCamelCase__ : Dict = False , ):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , jnp.ndarray ):
lowercase_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCamelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase_ = timesteps.astype(dtype=jnp.floataa )
lowercase_ = jnp.expand_dims(UpperCamelCase__ , 0 )
lowercase_ = self.time_proj(UpperCamelCase__ )
lowercase_ = self.time_embedding(UpperCamelCase__ )
# 2. pre-process
lowercase_ = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
lowercase_ = self.conv_in(UpperCamelCase__ )
# 3. down
lowercase_ = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ , lowercase_ = down_block(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
else:
lowercase_ , lowercase_ = down_block(UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowercase_ = ()
for down_block_res_sample, down_block_additional_residual in zip(
UpperCamelCase__ , UpperCamelCase__ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowercase_ = new_down_block_res_samples
# 4. mid
lowercase_ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowercase_ = down_block_res_samples[-(self.layers_per_block + 1) :]
lowercase_ = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = up_block(
UpperCamelCase__ , temb=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , res_hidden_states_tuple=UpperCamelCase__ , deterministic=not train , )
else:
lowercase_ = up_block(UpperCamelCase__ , temb=UpperCamelCase__ , res_hidden_states_tuple=UpperCamelCase__ , deterministic=not train )
# 6. post-process
lowercase_ = self.conv_norm_out(UpperCamelCase__ )
lowercase_ = nn.silu(UpperCamelCase__ )
lowercase_ = self.conv_out(UpperCamelCase__ )
lowercase_ = jnp.transpose(UpperCamelCase__ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=UpperCamelCase__ )
| 702
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError("""Input value must be an 'int' type""" )
lowercase_ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
| 0
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
a = """true"""
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=8_2 , UpperCAmelCase__=1_6 ):
set_seed(4_2 )
lowercase_ = RegressionModel()
lowercase_ = deepcopy(snake_case_ )
lowercase_ = RegressionDataset(length=snake_case_ )
lowercase_ = DataLoader(snake_case_ , batch_size=snake_case_ )
model.to(accelerator.device )
lowercase_ = accelerator.prepare(snake_case_ , snake_case_ )
return model, ddp_model, dataloader
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=False ):
lowercase_ = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
lowercase_ = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(UpperCAmelCase__ ):
lowercase_ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
with accelerator.main_process_first():
lowercase_ = dataset.map(
snake_case_ , batched=snake_case_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
lowercase_ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCAmelCase__ ):
if use_longest:
return tokenizer.pad(snake_case_ , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(snake_case_ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return DataLoader(snake_case_ , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=1_6 )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = Accelerator(dispatch_batches=snake_case_ , split_batches=snake_case_ )
lowercase_ = get_dataloader(snake_case_ , not dispatch_batches )
lowercase_ = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=snake_case_ )
lowercase_ = accelerator.prepare(snake_case_ , snake_case_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = []
for batch in dataloader:
lowercase_ = batch.values()
with torch.no_grad():
lowercase_ = model(snake_case_ )
lowercase_ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowercase_ = [], []
for logit, targ in logits_and_targets:
logits.append(snake_case_ )
targs.append(snake_case_ )
lowercase_ = torch.cat(snake_case_ ), torch.cat(snake_case_ )
return logits, targs
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=8_2 , UpperCAmelCase__=False , UpperCAmelCase__=False , UpperCAmelCase__=1_6 ):
lowercase_ = get_basic_setup(snake_case_ , snake_case_ , snake_case_ )
lowercase_ = generate_predictions(snake_case_ , snake_case_ , snake_case_ )
assert (
len(snake_case_ ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case_ )}'''
def UpperCAmelCase_ ( UpperCAmelCase__ = False , UpperCAmelCase__ = False ):
lowercase_ = evaluate.load("""glue""" , """mrpc""" )
lowercase_ = get_mrpc_setup(snake_case_ , snake_case_ )
# First do baseline
lowercase_ = setup["no"]
model.to(snake_case_ )
model.eval()
for batch in dataloader:
batch.to(snake_case_ )
with torch.inference_mode():
lowercase_ = model(**snake_case_ )
lowercase_ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case_ , references=batch["""labels"""] )
lowercase_ = metric.compute()
# Then do distributed
lowercase_ = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowercase_ = model(**snake_case_ )
lowercase_ = outputs.logits.argmax(dim=-1 )
lowercase_ = batch["labels"]
lowercase_ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case_ , references=snake_case_ )
lowercase_ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def UpperCAmelCase_ ( ):
lowercase_ = Accelerator(split_batches=snake_case_ , dispatch_batches=snake_case_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(snake_case_ , snake_case_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowercase_ = Accelerator(split_batches=snake_case_ , dispatch_batches=snake_case_ )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(snake_case_ , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
lowercase_ = Accelerator()
test_torch_metrics(snake_case_ , 5_1_2 )
accelerator.state._reset_state()
def UpperCAmelCase_ ( UpperCAmelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 703
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ):
@register_to_config
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : bool = False , ):
'''simple docstring'''
super().__init__()
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = False
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
lowercase_ = TaConfig(
vocab_size=UpperCamelCase__ , d_model=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_kv=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , feed_forward_proj=UpperCamelCase__ , is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , )
lowercase_ = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
lowercase_ = TaBlock(UpperCamelCase__ )
self.encoders.append(UpperCamelCase__ )
lowercase_ = TaLayerNorm(UpperCamelCase__ )
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = self.token_embedder(UpperCamelCase__ )
lowercase_ = encoder_input_tokens.shape[1]
lowercase_ = torch.arange(UpperCamelCase__ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase__ )
lowercase_ = self.dropout_pre(UpperCamelCase__ )
# inverted the attention mask
lowercase_ = encoder_input_tokens.size()
lowercase_ = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ )
for lyr in self.encoders:
lowercase_ = lyr(UpperCamelCase__ , UpperCamelCase__ )[0]
lowercase_ = self.layer_norm(UpperCamelCase__ )
return self.dropout_post(UpperCamelCase__ ), encoder_inputs_mask
| 650
| 0
|
import operator as op
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = []
lowercase_ = lambda UpperCAmelCase__ , UpperCAmelCase__ : int(x / y ) # noqa: E731 integer division operation
lowercase_ = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("""Symbol""".center(8 ) , """Action""".center(1_2 ) , """Stack""" , sep=""" | """ )
print("""-""" * (3_0 + len(_lowerCamelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_lowerCamelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(1_2 ) , """,""".join(_lowerCamelCase ) , sep=""" | """ )
else:
lowercase_ = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(1_2 ) , """,""".join(_lowerCamelCase ) , sep=""" | """ )
lowercase_ = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(1_2 ) , """,""".join(_lowerCamelCase ) , sep=""" | """ )
stack.append(
str(opr[x](int(_lowerCamelCase ) , int(_lowerCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(1_2 ) , """,""".join(_lowerCamelCase ) , sep=""" | """ , )
return int(stack[0] )
if __name__ == "__main__":
a = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 704
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a = TypeVar('T')
class UpperCamelCase__ ( Generic[T] ):
__SCREAMING_SNAKE_CASE : deque[T] # Cache store of keys
__SCREAMING_SNAKE_CASE : set[T] # References of the keys in cache
__SCREAMING_SNAKE_CASE : int = 10 # Maximum capacity of cache
def __init__( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = deque()
lowercase_ = set()
if not n:
lowercase_ = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
lowercase_ = n
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : T ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase_ = self.dq_store.pop()
self.key_reference.remove(UpperCamelCase__ )
else:
self.dq_store.remove(UpperCamelCase__ )
self.dq_store.appendleft(UpperCamelCase__ )
self.key_reference.add(UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
for k in self.dq_store:
print(UpperCamelCase__ )
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
a = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 650
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = ShapEImgaImgPipeline
__SCREAMING_SNAKE_CASE : str = ['image']
__SCREAMING_SNAKE_CASE : Any = ['image']
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[str] = False
@property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return 8
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase_ = CLIPVisionModel(_a )
return model
@property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = CLIPImageProcessor(
crop_size=224 , do_center_crop=_a , do_normalize=_a , do_resize=_a , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
@property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
lowercase_ = PriorTransformer(**_a )
return model
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
lowercase_ = ShapERenderer(**_a )
return model
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = self.dummy_prior
lowercase_ = self.dummy_image_encoder
lowercase_ = self.dummy_image_processor
lowercase_ = self.dummy_renderer
lowercase_ = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1_024 , prediction_type="""sample""" , use_karras_sigmas=_a , clip_sample=_a , clip_sample_range=1.0 , )
lowercase_ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(_a )
else:
lowercase_ = torch.Generator(device=_a ).manual_seed(_a )
lowercase_ = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**_a )
lowercase_ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowercase_ = pipe(**self.get_dummy_inputs(_a ) )
lowercase_ = output.images[0]
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase_ = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = torch_device == """cpu"""
lowercase_ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_a , relax_max_difference=_a , )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**_a )
lowercase_ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowercase_ = 1
lowercase_ = 2
lowercase_ = self.get_dummy_inputs(_a )
for key in inputs.keys():
if key in self.batch_params:
lowercase_ = batch_size * [inputs[key]]
lowercase_ = pipe(**_a , num_images_per_prompt=_a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
lowercase_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
lowercase_ = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
lowercase_ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowercase_ = torch.Generator(device=_a ).manual_seed(0 )
lowercase_ = pipe(
_a , generator=_a , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_a , _a )
| 705
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
| 0
|
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(snake_case__ , snake_case__ ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
lowercase_ = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(snake_case__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
def UpperCAmelCase_ ( UpperCAmelCase__=2_8_1_2_3 ):
lowercase_ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowercase_ = set()
lowercase_ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(UpperCAmelCase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 650
| 0
|
a = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
a = [{'type': 'code', 'content': INSTALL_CONTENT}]
a = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 707
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase__ :
def __init__( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : int=None , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = True
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
lowercase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
# select random slice
lowercase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE : List[str] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """single_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """multi_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = ids_tensor([1, 10] , config.vocab_size )
lowercase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = {"""type""": scaling_type, """factor""": 10.0}
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 650
| 0
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
a = logging.getLogger(__name__)
def UpperCAmelCase_ ( UpperCAmelCase__=2 , UpperCAmelCase__=3 , UpperCAmelCase__=1_6 , UpperCAmelCase__ = 1_0 , UpperCAmelCase__ = 2 ):
def get_dataset(UpperCAmelCase__ ):
lowercase_ = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCAmelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
lowercase_ = get_dataset(UpperCAmelCase__ )
lowercase_ = get_dataset(UpperCAmelCase__ )
lowercase_ = DataLoader(UpperCAmelCase__ , shuffle=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , num_workers=4 )
lowercase_ = DataLoader(UpperCAmelCase__ , shuffle=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None ):
lowercase_ = []
for epoch in range(UpperCAmelCase__ ):
# Train quickly
model.train()
for batch in dataloader:
lowercase_ = batch
lowercase_ = model(UpperCAmelCase__ )
lowercase_ = torch.nn.functional.mse_loss(UpperCAmelCase__ , UpperCAmelCase__ )
accelerator.backward(UpperCAmelCase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class UpperCamelCase__ ( nn.Module ):
def __init__( self : Optional[Any] ):
'''simple docstring'''
super().__init__()
lowercase_ = nn.Parameter(torch.randn(1 ) )
lowercase_ = nn.Parameter(torch.randn(1 ) )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Dict ):
'''simple docstring'''
return x * self.a + self.b
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ = dummy_dataloaders()
lowercase_ = ProjectConfiguration(total_limit=1 , project_dir=lowercase_ , automatic_checkpoint_naming=lowercase_ )
# Train baseline
lowercase_ = Accelerator(project_config=lowercase_ )
lowercase_ = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ = dummy_dataloaders()
# Train baseline
lowercase_ = Accelerator()
lowercase_ = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Save initial
lowercase_ = os.path.join(lowercase_ , """initial""" )
accelerator.save_state(lowercase_ )
(lowercase_) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
lowercase_ = train(3 , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
(lowercase_) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ = dummy_dataloaders()
lowercase_ = Accelerator()
lowercase_ = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
accelerator.load_state(lowercase_ )
(lowercase_) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
lowercase_ = train(2 , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Save everything
lowercase_ = os.path.join(lowercase_ , """checkpoint""" )
accelerator.save_state(lowercase_ )
# Load everything back in and make sure all states work
accelerator.load_state(lowercase_ )
test_rands += train(1 , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
(lowercase_) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ = dummy_dataloaders()
lowercase_ = ProjectConfiguration(automatic_checkpoint_naming=lowercase_ )
# Train baseline
lowercase_ = Accelerator(project_dir=lowercase_ , project_config=lowercase_ )
lowercase_ = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Save initial
accelerator.save_state()
(lowercase_) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
lowercase_ = train(3 , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
(lowercase_) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ = dummy_dataloaders()
lowercase_ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowercase_ )
lowercase_ = Accelerator(project_dir=lowercase_ , project_config=lowercase_ )
lowercase_ = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
accelerator.load_state(os.path.join(lowercase_ , """checkpoints""" , """checkpoint_0""" ) )
(lowercase_) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
lowercase_ = train(2 , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowercase_ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
(lowercase_) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = torch.tensor([1, 2, 3] )
lowercase_ = torch.tensor([2, 3, 4] )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(net.parameters() )
lowercase_ = Accelerator()
with self.assertRaises(lowercase_ ) as ve:
accelerator.register_for_checkpointing(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase_ = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ = torch.optim.lr_scheduler.StepLR(lowercase_ , step_size=1 , gamma=0.99 )
lowercase_ = dummy_dataloaders()
lowercase_ = ProjectConfiguration(automatic_checkpoint_naming=lowercase_ )
# Train baseline
lowercase_ = Accelerator(project_dir=lowercase_ , project_config=lowercase_ )
lowercase_ = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Save initial
accelerator.save_state()
lowercase_ = scheduler.state_dict()
train(3 , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
self.assertNotEqual(lowercase_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowercase_ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(lowercase_ , scheduler.state_dict() )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = ProjectConfiguration(automatic_checkpoint_naming=lowercase_ , total_limit=2 )
# Train baseline
lowercase_ = Accelerator(project_dir=lowercase_ , project_config=lowercase_ )
lowercase_ = accelerator.prepare(lowercase_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowercase_ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase_ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase_ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
if __name__ == "__main__":
a = "/tmp/accelerate/state_checkpointing"
a = DummyModel()
a = torch.optim.Adam(params=model.parameters(), lr=1E-3)
a = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
a = dummy_dataloaders()
a = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
a = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
a = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
a = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
a = group["params"][0].device
break
assert param_device.type == accelerator.device.type
a = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
a = group["params"][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
a = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 708
|
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a = False
a = logging.get_logger(__name__)
a = 'ybelkada/fonts'
def UpperCAmelCase_ ( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
requires_backends(UpperCAmelCase__ , ["""torch"""] )
_check_torch_version()
lowercase_ = image_tensor.unsqueeze(0 )
lowercase_ = torch.nn.functional.unfold(UpperCAmelCase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
lowercase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase__ , UpperCAmelCase__ , -1 )
lowercase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = 3_6 , UpperCAmelCase__ = "black" , UpperCAmelCase__ = "white" , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , ):
requires_backends(UpperCAmelCase__ , """vision""" )
# Add new lines so that each line is no more than 80 characters.
lowercase_ = textwrap.TextWrapper(width=8_0 )
lowercase_ = wrapper.wrap(text=UpperCAmelCase__ )
lowercase_ = """\n""".join(UpperCAmelCase__ )
if font_bytes is not None and font_path is None:
lowercase_ = io.BytesIO(UpperCAmelCase__ )
elif font_path is not None:
lowercase_ = font_path
else:
lowercase_ = hf_hub_download(UpperCAmelCase__ , """Arial.TTF""" )
lowercase_ = ImageFont.truetype(UpperCAmelCase__ , encoding="""UTF-8""" , size=UpperCAmelCase__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
lowercase_ = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , UpperCAmelCase__ ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = temp_draw.textbbox((0, 0) , UpperCAmelCase__ , UpperCAmelCase__ )
# Create the actual image with a bit of padding around the text.
lowercase_ = text_width + left_padding + right_padding
lowercase_ = text_height + top_padding + bottom_padding
lowercase_ = Image.new("""RGB""" , (image_width, image_height) , UpperCAmelCase__ )
lowercase_ = ImageDraw.Draw(UpperCAmelCase__ )
draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase__ , fill=UpperCAmelCase__ , font=UpperCAmelCase__ )
return image
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(UpperCAmelCase__ , """vision""" )
# Convert to PIL image if necessary
lowercase_ = to_pil_image(UpperCAmelCase__ )
lowercase_ = render_text(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase_ = max(header_image.width , image.width )
lowercase_ = int(image.height * (new_width / image.width) )
lowercase_ = int(header_image.height * (new_width / header_image.width) )
lowercase_ = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
lowercase_ = to_numpy_array(UpperCAmelCase__ )
if infer_channel_dimension_format(UpperCAmelCase__ ) == ChannelDimension.LAST:
lowercase_ = to_channel_dimension_format(UpperCAmelCase__ , ChannelDimension.LAST )
return new_image
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Tuple = ['flattened_patches']
def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : int = 2_048 , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
lowercase_ = do_normalize
lowercase_ = do_convert_rgb
lowercase_ = max_patches
lowercase_ = is_vqa
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : dict , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(self.extract_flattened_patches , """torch""" )
_check_torch_version()
# convert to torch
lowercase_ = to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.FIRST )
lowercase_ = torch.from_numpy(UpperCamelCase__ )
lowercase_ , lowercase_ = patch_size["""height"""], patch_size["""width"""]
lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ )
# maximize scale s.t.
lowercase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
lowercase_ = max(min(math.floor(scale * image_height / patch_height ) , UpperCamelCase__ ) , 1 )
lowercase_ = max(min(math.floor(scale * image_width / patch_width ) , UpperCamelCase__ ) , 1 )
lowercase_ = max(num_feasible_rows * patch_height , 1 )
lowercase_ = max(num_feasible_cols * patch_width , 1 )
lowercase_ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=UpperCamelCase__ , antialias=UpperCamelCase__ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
lowercase_ = torch_extract_patches(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = patches.shape
lowercase_ = patches_shape[1]
lowercase_ = patches_shape[2]
lowercase_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
lowercase_ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
lowercase_ = torch.arange(UpperCamelCase__ ).reshape([rows, 1] ).repeat(1 , UpperCamelCase__ ).reshape([rows * columns, 1] )
lowercase_ = torch.arange(UpperCamelCase__ ).reshape([1, columns] ).repeat(UpperCamelCase__ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
lowercase_ = row_ids.to(torch.floataa )
lowercase_ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
lowercase_ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
lowercase_ = torch.nn.functional.pad(UpperCamelCase__ , [0, 0, 0, max_patches - (rows * columns)] ).float()
lowercase_ = to_numpy_array(UpperCamelCase__ )
return result
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict ):
'''simple docstring'''
if image.dtype == np.uinta:
lowercase_ = image.astype(np.floataa )
# take mean across the whole `image`
lowercase_ = np.mean(UpperCamelCase__ )
lowercase_ = np.std(UpperCamelCase__ )
lowercase_ = max(UpperCamelCase__ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase_ = patch_size if patch_size is not None else self.patch_size
lowercase_ = max_patches if max_patches is not None else self.max_patches
lowercase_ = self.is_vqa
if kwargs.get("""data_format""" , UpperCamelCase__ ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase_ = [convert_to_rgb(UpperCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
lowercase_ = kwargs.pop("""font_bytes""" , UpperCamelCase__ )
lowercase_ = kwargs.pop("""font_path""" , UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = [header_text] * len(UpperCamelCase__ )
lowercase_ = [
render_header(UpperCamelCase__ , header_text[i] , font_bytes=UpperCamelCase__ , font_path=UpperCamelCase__ )
for i, image in enumerate(UpperCamelCase__ )
]
if do_normalize:
lowercase_ = [self.normalize(image=UpperCamelCase__ ) for image in images]
# convert to torch tensor and permute
lowercase_ = [
self.extract_flattened_patches(image=UpperCamelCase__ , max_patches=UpperCamelCase__ , patch_size=UpperCamelCase__ )
for image in images
]
# create attention mask in numpy
lowercase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
lowercase_ = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=UpperCamelCase__ )
return encoded_outputs
| 650
| 0
|
import os
import time
import numpy as np
import onnxruntime as ort
a = '''1'''
a = '''0'''
a = '''1'''
a = ort.SessionOptions()
a = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('Create inference session...')
a = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
a = ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider)
a = ort.RunOptions()
a = 1_2_8
a = 1
a = np.ones((batch, sequence), dtype=np.intaa)
a = np.ones((batch, sequence), dtype=np.intaa)
a = np.ones((batch, sequence), dtype=np.intaa)
print('Warm up phase...')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Start inference...')
a = time.time()
a = 2_0_0_0
a = {}
for iter in range(max_iters):
a = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1_0_0_0 / max_iters))
| 709
|
import cva
import numpy as np
class UpperCamelCase__ :
def __init__( self : List[str] , UpperCamelCase__ : float , UpperCamelCase__ : int ):
'''simple docstring'''
if k in (0.04, 0.06):
lowercase_ = k
lowercase_ = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Optional[int] ):
'''simple docstring'''
return str(self.k )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = cva.imread(UpperCamelCase__ , 0 )
lowercase_ , lowercase_ = img.shape
lowercase_ = []
lowercase_ = img.copy()
lowercase_ = cva.cvtColor(UpperCamelCase__ , cva.COLOR_GRAY2RGB )
lowercase_ , lowercase_ = np.gradient(UpperCamelCase__ )
lowercase_ = dx**2
lowercase_ = dy**2
lowercase_ = dx * dy
lowercase_ = 0.04
lowercase_ = self.window_size // 2
for y in range(UpperCamelCase__ , h - offset ):
for x in range(UpperCamelCase__ , w - offset ):
lowercase_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = (wxx * wyy) - (wxy**2)
lowercase_ = wxx + wyy
lowercase_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
a = HarrisCorner(0.04, 3)
a , a = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 650
| 0
|
import numpy as np
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
return np.where(vector > 0 , UpperCAmelCase__ , (alpha * (np.exp(UpperCAmelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
a = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
a = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowercase_ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ = numpy_to_pil(UpperCAmelCase__ )
return images
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if images.ndim == 3:
lowercase_ = images[None, ...]
lowercase_ = (images * 2_5_5).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase_ = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
lowercase_ = [Image.fromarray(UpperCAmelCase__ ) for image in images]
return pil_images
| 650
| 0
|
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
a = 'hf-internal-testing/tiny-random-bert'
a = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
a = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = cached_file(UpperCAmelCase_ , UpperCAmelCase_ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCAmelCase_ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
with open(os.path.join(UpperCAmelCase_ , """refs""" , """main""" ) ) as f:
lowercase_ = f.read()
self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , """snapshots""" , UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertTrue(os.path.isfile(UpperCAmelCase_ ) )
# File is cached at the same place the second time.
lowercase_ = cached_file(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Using a specific revision to test the full commit hash.
lowercase_ = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision="""9b8c223""" )
self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , """snapshots""" , UpperCAmelCase_ , UpperCAmelCase_ ) )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
with self.assertRaisesRegex(UpperCAmelCase_ , """is not a valid model identifier""" ):
lowercase_ = cached_file("""tiny-random-bert""" , UpperCAmelCase_ )
with self.assertRaisesRegex(UpperCAmelCase_ , """is not a valid git identifier""" ):
lowercase_ = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision="""aaaa""" )
with self.assertRaisesRegex(UpperCAmelCase_ , """does not appear to have a file named""" ):
lowercase_ = cached_file(UpperCAmelCase_ , """conf""" )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
with self.assertRaisesRegex(UpperCAmelCase_ , """does not appear to have a file named""" ):
lowercase_ = cached_file(UpperCAmelCase_ , """conf""" )
with open(os.path.join(UpperCAmelCase_ , """refs""" , """main""" ) ) as f:
lowercase_ = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , """.no_exist""" , UpperCAmelCase_ , """conf""" ) ) )
lowercase_ = cached_file(UpperCAmelCase_ , """conf""" , _raise_exceptions_for_missing_entries=UpperCAmelCase_ )
self.assertIsNone(UpperCAmelCase_ )
lowercase_ = cached_file(UpperCAmelCase_ , """conf""" , local_files_only=UpperCAmelCase_ , _raise_exceptions_for_missing_entries=UpperCAmelCase_ )
self.assertIsNone(UpperCAmelCase_ )
lowercase_ = mock.Mock()
lowercase_ = 500
lowercase_ = {}
lowercase_ = HTTPError
lowercase_ = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=UpperCAmelCase_ ) as mock_head:
lowercase_ = cached_file(UpperCAmelCase_ , """conf""" , _raise_exceptions_for_connection_errors=UpperCAmelCase_ )
self.assertIsNone(UpperCAmelCase_ )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCAmelCase_ ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCAmelCase_ ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCAmelCase_ ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCAmelCase_ , """is not a valid model identifier""" ):
get_file_from_repo("""bert-base-case""" , UpperCAmelCase_ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCAmelCase_ , """is not a valid git identifier""" ):
get_file_from_repo("""bert-base-cased""" , UpperCAmelCase_ , revision="""ahaha""" )
lowercase_ = get_file_from_repo("""bert-base-cased""" , UpperCAmelCase_ )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowercase_ = json.loads(open(UpperCAmelCase_ , """r""" ).read() )
self.assertEqual(config["""hidden_size"""] , 768 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ = Path(UpperCAmelCase_ ) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(UpperCAmelCase_ , """a.txt""" ) , str(UpperCAmelCase_ ) )
self.assertIsNone(get_file_from_repo(UpperCAmelCase_ , """b.txt""" ) )
| 711
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = (UnCLIPScheduler,)
def UpperCAmelCase__ ( self : int , **UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = {
"""num_train_timesteps""": 1_000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**UpperCamelCase__ )
return config
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(variance_type="""fixed_small_log""" )
lowercase_ = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(variance_type="""learned_range""" )
lowercase_ = scheduler_class(**UpperCamelCase__ )
lowercase_ = 0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1e-5
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCamelCase__ )
lowercase_ = scheduler.timesteps
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
lowercase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(25 )
lowercase_ = scheduler.timesteps
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
lowercase_ = None
else:
lowercase_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase_ = scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
| 650
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = 1
lowercase_ = 3
lowercase_ = (32, 32)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase__ )
return image
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(UpperCamelCase__ )
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
def extract(*UpperCamelCase__ : str , **UpperCamelCase__ : str ):
class UpperCamelCase__ :
def __init__( self : Any ):
'''simple docstring'''
lowercase_ = torch.ones([0] )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : int ):
'''simple docstring'''
self.pixel_values.to(UpperCamelCase__ )
return self
return Out()
return extract
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCamelCase__ )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCamelCase__ )
lowercase_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase__ )
lowercase_ = alt_pipe.to(UpperCamelCase__ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = '''A painting of a squirrel eating a burger'''
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase__ , )
lowercase_ = output.images
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCamelCase__ )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCamelCase__ )
# put models in fp16
lowercase_ = unet.half()
lowercase_ = vae.half()
lowercase_ = bert.half()
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase__ )
lowercase_ = alt_pipe.to(UpperCamelCase__ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = '''A painting of a squirrel eating a burger'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase_ = init_image.resize((760, 504) )
lowercase_ = '''BAAI/AltDiffusion'''
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase__ , safety_checker=UpperCamelCase__ , )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
lowercase_ = '''A fantasy landscape, trending on artstation'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase__ , output_type="""np""" , )
lowercase_ = output.images[0]
lowercase_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase_ = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowercase_ = init_image.resize((768, 512) )
lowercase_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
lowercase_ = '''BAAI/AltDiffusion'''
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase__ , safety_checker=UpperCamelCase__ , )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
lowercase_ = '''A fantasy landscape, trending on artstation'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase__ , output_type="""np""" , )
lowercase_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 712
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
a = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the training data.'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the validation data.'} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1_5 , metadata={'help': 'Percent to split off of train for validation.'} )
__SCREAMING_SNAKE_CASE : int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} )
__SCREAMING_SNAKE_CASE : float = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = {}
if self.train_dir is not None:
lowercase_ = self.train_dir
if self.validation_dir is not None:
lowercase_ = self.validation_dir
lowercase_ = data_files if data_files else None
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = field(
default=__magic_name__ , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__magic_name__ )} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
__SCREAMING_SNAKE_CASE : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__SCREAMING_SNAKE_CASE : str = field(default=__magic_name__ , metadata={'help': 'Name or path of preprocessor config.'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={'help': 'Stride to use for the encoder.'} , )
class UpperCamelCase__ :
def __init__( self : Dict , UpperCamelCase__ : List[Any]=192 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : str=0.6 ):
'''simple docstring'''
lowercase_ = input_size
lowercase_ = mask_patch_size
lowercase_ = model_patch_size
lowercase_ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
lowercase_ = self.input_size // self.mask_patch_size
lowercase_ = self.mask_patch_size // self.model_patch_size
lowercase_ = self.rand_size**2
lowercase_ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : int ):
'''simple docstring'''
lowercase_ = np.random.permutation(self.token_count )[: self.mask_count]
lowercase_ = np.zeros(self.token_count , dtype=UpperCamelCase__ )
lowercase_ = 1
lowercase_ = mask.reshape((self.rand_size, self.rand_size) )
lowercase_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = torch.stack([example["""pixel_values"""] for example in examples] )
lowercase_ = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def UpperCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , UpperCAmelCase__ , UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase_ = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCAmelCase__ ) and data_args.train_val_split > 0.0:
lowercase_ = ds["""train"""].train_test_split(data_args.train_val_split )
lowercase_ = split["""train"""]
lowercase_ = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowercase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(UpperCAmelCase__ , """decoder_type""" ):
lowercase_ = """simmim"""
# adapt config
lowercase_ = model_args.image_size if model_args.image_size is not None else config.image_size
lowercase_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowercase_ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowercase_ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowercase_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowercase_ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowercase_ = AutoModelForMaskedImageModeling.from_config(UpperCAmelCase__ )
if training_args.do_train:
lowercase_ = ds["""train"""].column_names
else:
lowercase_ = ds["""validation"""].column_names
if data_args.image_column_name is not None:
lowercase_ = data_args.image_column_name
elif "image" in column_names:
lowercase_ = """image"""
elif "img" in column_names:
lowercase_ = """img"""
else:
lowercase_ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowercase_ = Compose(
[
Lambda(lambda UpperCAmelCase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowercase_ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(UpperCAmelCase__ ):
lowercase_ = [transforms(UpperCAmelCase__ ) for image in examples[image_column_name]]
lowercase_ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
lowercase_ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCAmelCase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
lowercase_ = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCAmelCase__ )
# Initialize our trainer
lowercase_ = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
lowercase_ = None
if training_args.resume_from_checkpoint is not None:
lowercase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ = last_checkpoint
lowercase_ = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase_ = trainer.evaluate()
trainer.log_metrics("""eval""" , UpperCAmelCase__ )
trainer.save_metrics("""eval""" , UpperCAmelCase__ )
# Write model card and (optionally) push to hub
lowercase_ = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase__ )
else:
trainer.create_model_card(**UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 650
| 0
|
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCamelCase__ :
def __init__( self : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=99 , UpperCamelCase__ : Any=13 , UpperCamelCase__ : Optional[int]=16 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Any=True , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[str]=32 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : List[str]=30 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : str=1 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Any=None , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = decoder_seq_length
# For common tests
lowercase_ = self.decoder_seq_length
lowercase_ = is_training
lowercase_ = use_attention_mask
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = d_model
lowercase_ = d_model
lowercase_ = decoder_layers
lowercase_ = decoder_layers
lowercase_ = decoder_ffn_dim
lowercase_ = decoder_attention_heads
lowercase_ = decoder_attention_heads
lowercase_ = eos_token_id
lowercase_ = bos_token_id
lowercase_ = pad_token_id
lowercase_ = decoder_start_token_id
lowercase_ = use_cache
lowercase_ = max_position_embeddings
lowercase_ = None
lowercase_ = decoder_seq_length
lowercase_ = 2
lowercase_ = 1
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
lowercase_ = None
if self.use_attention_mask:
lowercase_ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
lowercase_ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = TrOCRDecoder(config=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ).eval()
lowercase_ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
lowercase_ = model(_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
lowercase_ = model(_SCREAMING_SNAKE_CASE )
lowercase_ = model(_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) + 1 )
lowercase_ = outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
lowercase_ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase_ = model(_SCREAMING_SNAKE_CASE )["""last_hidden_state"""]
lowercase_ = model(_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE )["""last_hidden_state"""]
# select random slice
lowercase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase_ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
lowercase_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : str = (TrOCRForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[str] = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
__SCREAMING_SNAKE_CASE : Tuple = False
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = TrOCRStandaloneDecoderModelTester(self , is_training=_SCREAMING_SNAKE_CASE )
lowercase_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
| 713
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : List[Any] = ['pixel_values']
def __init__( self : List[str] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = size if size is not None else {"""shortest_edge""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowercase_ = int((256 / 224) * size["""shortest_edge"""] )
lowercase_ = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
UpperCamelCase__ , size=(size_dict["""height"""], size_dict["""width"""]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[TensorType] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else self.crop_size
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
lowercase_ = [self.resize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_center_crop:
lowercase_ = [self.center_crop(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650
| 0
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
a = None
a = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
a = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : Optional[str] = None
# Automatically constructed
__SCREAMING_SNAKE_CASE : ClassVar[str] = "PIL.Image.Image"
__SCREAMING_SNAKE_CASE : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__SCREAMING_SNAKE_CASE : str = field(default='Image' , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def __call__( self : List[str] ):
'''simple docstring'''
return self.pa_type
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = np.array(UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return {"path": value, "bytes": None}
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return {"path": None, "bytes": value}
elif isinstance(UpperCamelCase__ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(UpperCamelCase__ )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : dict , UpperCamelCase__ : Tuple=None ):
'''simple docstring'''
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
lowercase_ = {}
lowercase_ = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(UpperCamelCase__ ):
lowercase_ = PIL.Image.open(UpperCamelCase__ )
else:
lowercase_ = path.split("""::""" )[-1]
try:
lowercase_ = string_to_dict(UpperCamelCase__ , config.HUB_DATASETS_URL )["""repo_id"""]
lowercase_ = token_per_repo_id.get(UpperCamelCase__ )
except ValueError:
lowercase_ = None
with xopen(UpperCamelCase__ , """rb""" , use_auth_token=UpperCamelCase__ ) as f:
lowercase_ = BytesIO(f.read() )
lowercase_ = PIL.Image.open(bytes_ )
else:
lowercase_ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
'''simple docstring'''
if pa.types.is_string(storage.type ):
lowercase_ = pa.array([None] * len(UpperCamelCase__ ) , type=pa.binary() )
lowercase_ = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase_ = pa.array([None] * len(UpperCamelCase__ ) , type=pa.string() )
lowercase_ = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowercase_ = storage.field("""bytes""" )
else:
lowercase_ = pa.array([None] * len(UpperCamelCase__ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowercase_ = storage.field("""path""" )
else:
lowercase_ = pa.array([None] * len(UpperCamelCase__ ) , type=pa.string() )
lowercase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowercase_ = pa.array(
[encode_np_array(np.array(UpperCamelCase__ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowercase_ = pa.array([None] * len(UpperCamelCase__ ) , type=pa.string() )
lowercase_ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(UpperCamelCase__ , self.pa_type )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : pa.StructArray ):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(UpperCamelCase__ : List[str] ):
with xopen(UpperCamelCase__ , """rb""" ) as f:
lowercase_ = f.read()
return bytes_
lowercase_ = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase_ = pa.array(
[os.path.basename(UpperCamelCase__ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowercase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(UpperCamelCase__ , self.pa_type )
def UpperCAmelCase_ ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowercase_ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = BytesIO()
if image.format in list_image_compression_formats():
lowercase_ = image.format
else:
lowercase_ = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(UpperCAmelCase__ , format=UpperCAmelCase__ )
return buffer.getvalue()
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if hasattr(UpperCAmelCase__ , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )}
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
lowercase_ = array.dtype
lowercase_ = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
lowercase_ = dtype.kind
lowercase_ = dtype.itemsize
lowercase_ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowercase_ = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowercase_ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowercase_ = dtype_byteorder + dtype_kind + str(UpperCAmelCase__ )
lowercase_ = np.dtype(UpperCAmelCase__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
lowercase_ = PIL.Image.fromarray(array.astype(UpperCAmelCase__ ) )
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )}
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
lowercase_ = first_non_null_value(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(UpperCAmelCase__ , np.ndarray ):
lowercase_ = no_op_if_value_is_null(UpperCAmelCase__ )
return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs]
elif isinstance(UpperCAmelCase__ , PIL.Image.Image ):
lowercase_ = no_op_if_value_is_null(UpperCAmelCase__ )
return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs]
else:
return objs
else:
return objs
| 714
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 650
| 0
|
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = AutoencoderKL
__SCREAMING_SNAKE_CASE : Any = """sample"""
__SCREAMING_SNAKE_CASE : str = 1e-2
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = 4
lowercase_ = 3
lowercase_ = (32, 32)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes ).to(__lowercase )
return {"sample": image}
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return (3, 32, 32)
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
lowercase_ = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.prepare_init_args_and_inputs_for_common()
lowercase_ = self.model_class(**__lowercase )
model.to(__lowercase )
assert not model.is_gradient_checkpointing and model.training
lowercase_ = model(**__lowercase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowercase_ = torch.randn_like(__lowercase )
lowercase_ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowercase_ = self.model_class(**__lowercase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__lowercase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowercase_ = model_a(**__lowercase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowercase_ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
lowercase_ = dict(model.named_parameters() )
lowercase_ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__lowercase )
lowercase_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
lowercase_ = model.to(__lowercase )
model.eval()
if torch_device == "mps":
lowercase_ = torch.manual_seed(0 )
else:
lowercase_ = torch.Generator(device=__lowercase ).manual_seed(0 )
lowercase_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase_ = image.to(__lowercase )
with torch.no_grad():
lowercase_ = model(__lowercase , sample_posterior=__lowercase , generator=__lowercase ).sample
lowercase_ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowercase_ = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
lowercase_ = torch.tensor(
[-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] )
else:
lowercase_ = torch.tensor(
[-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] )
self.assertTrue(torch_all_close(__lowercase , __lowercase , rtol=1e-2 ) )
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : str ):
'''simple docstring'''
return F'''gaussian_noise_s={seed}_shape={"_".join([str(__lowercase ) for s in shape] )}.npy'''
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : List[str]=(4, 3, 512, 512) , UpperCamelCase__ : List[Any]=False ):
'''simple docstring'''
lowercase_ = torch.floataa if fpaa else torch.floataa
lowercase_ = torch.from_numpy(load_hf_numpy(self.get_file_format(__lowercase , __lowercase ) ) ).to(__lowercase ).to(__lowercase )
return image
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Any="CompVis/stable-diffusion-v1-4" , UpperCamelCase__ : List[Any]=False ):
'''simple docstring'''
lowercase_ = """fp16""" if fpaa else None
lowercase_ = torch.floataa if fpaa else torch.floataa
lowercase_ = AutoencoderKL.from_pretrained(
__lowercase , subfolder="""vae""" , torch_dtype=__lowercase , revision=__lowercase , )
model.to(__lowercase ).eval()
return model
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[int]=0 ):
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(__lowercase )
return torch.Generator(device=__lowercase ).manual_seed(__lowercase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(__lowercase )
lowercase_ = self.get_generator(__lowercase )
with torch.no_grad():
lowercase_ = model(__lowercase , generator=__lowercase , sample_posterior=__lowercase ).sample
assert sample.shape == image.shape
lowercase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase_ = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(__lowercase , __lowercase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]],
[47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model(fpaa=__lowercase )
lowercase_ = self.get_sd_image(__lowercase , fpaa=__lowercase )
lowercase_ = self.get_generator(__lowercase )
with torch.no_grad():
lowercase_ = model(__lowercase , generator=__lowercase , sample_posterior=__lowercase ).sample
assert sample.shape == image.shape
lowercase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase_ = torch.tensor(__lowercase )
assert torch_all_close(__lowercase , __lowercase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(__lowercase )
with torch.no_grad():
lowercase_ = model(__lowercase ).sample
assert sample.shape == image.shape
lowercase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase_ = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(__lowercase , __lowercase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]],
[37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(__lowercase , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase_ = model.decode(__lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase_ = sample[-1, -2:, :2, -2:].flatten().cpu()
lowercase_ = torch.tensor(__lowercase )
assert torch_all_close(__lowercase , __lowercase , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]],
[16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model(fpaa=__lowercase )
lowercase_ = self.get_sd_image(__lowercase , shape=(3, 4, 64, 64) , fpaa=__lowercase )
with torch.no_grad():
lowercase_ = model.decode(__lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase_ = torch.tensor(__lowercase )
assert torch_all_close(__lowercase , __lowercase , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model(fpaa=__lowercase )
lowercase_ = self.get_sd_image(__lowercase , shape=(3, 4, 64, 64) , fpaa=__lowercase )
with torch.no_grad():
lowercase_ = model.decode(__lowercase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase_ = model.decode(__lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__lowercase , __lowercase , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Any ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(__lowercase , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase_ = model.decode(__lowercase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase_ = model.decode(__lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__lowercase , __lowercase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]],
[47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]],
# fmt: on
] )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(__lowercase )
lowercase_ = self.get_generator(__lowercase )
with torch.no_grad():
lowercase_ = model.encode(__lowercase ).latent_dist
lowercase_ = dist.sample(generator=__lowercase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowercase_ = sample[0, -1, -3:, -3:].flatten().cpu()
lowercase_ = torch.tensor(__lowercase )
lowercase_ = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(__lowercase , __lowercase , atol=__lowercase )
| 715
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
a = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
for attribute in key.split(""".""" ):
lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
if weight_type is not None:
lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape
else:
lowercase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase_ = value
elif weight_type == "weight_g":
lowercase_ = value
elif weight_type == "weight_v":
lowercase_ = value
elif weight_type == "bias":
lowercase_ = value
else:
lowercase_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = []
lowercase_ = fairseq_model.state_dict()
lowercase_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase_ = None
for name, value in fairseq_dict.items():
lowercase_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == """group""" , )
lowercase_ = True
elif name.split(""".""" )[0] == "proj":
lowercase_ = fairseq_model.proj
lowercase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase_ = True
if "*" in mapped_key:
lowercase_ = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2]
lowercase_ = mapped_key.replace("""*""" , UpperCAmelCase__ )
if "weight_g" in name:
lowercase_ = """weight_g"""
elif "weight_v" in name:
lowercase_ = """weight_v"""
elif "bias" in name:
lowercase_ = """bias"""
elif "weight" in name:
lowercase_ = """weight"""
else:
lowercase_ = None
set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = full_name.split("""conv_layers.""" )[-1]
lowercase_ = name.split(""".""" )
lowercase_ = int(items[0] )
lowercase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ , lowercase_ = emb.weight.shape
lowercase_ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ )
lowercase_ = emb.weight.data
return lin_layer
def UpperCAmelCase_ ( UpperCAmelCase__ ):
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.split(""" """ )[0] for line in lines]
lowercase_ = len(UpperCAmelCase__ )
lowercase_ = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(UpperCAmelCase__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
lowercase_ = WavaVecaConfig.from_pretrained(UpperCAmelCase__ )
lowercase_ = SpeechaTextaConfig.from_pretrained(
UpperCAmelCase__ , vocab_size=UpperCAmelCase__ , decoder_layers=UpperCAmelCase__ , do_stable_layer_norm=UpperCAmelCase__ )
lowercase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
lowercase_ = model[0].eval()
# set weights for wav2vec2 encoder
lowercase_ = WavaVecaModel(UpperCAmelCase__ )
lowercase_ = recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase__ )
lowercase_ = SpeechaTextaForCausalLM(UpperCAmelCase__ )
lowercase_ , lowercase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
lowercase_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowercase_ = SpeechEncoderDecoderModel(encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
lowercase_ = False
# add projection layer
lowercase_ = nn.Parameter(projection_layer.weight )
lowercase_ = nn.Parameter(projection_layer.bias )
lowercase_ = create_vocab_dict(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = SpeechaTextaTokenizer(os.path.join(UpperCAmelCase__ , """vocab.json""" ) )
tokenizer.save_pretrained(UpperCAmelCase__ )
lowercase_ = hf_wavavec.config.to_dict()
lowercase_ = tokenizer.pad_token_id
lowercase_ = tokenizer.bos_token_id
lowercase_ = tokenizer.eos_token_id
lowercase_ = """speech_to_text_2"""
lowercase_ = """wav2vec2"""
lowercase_ = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase__ )
hf_wavavec.save_pretrained(UpperCAmelCase__ )
feature_extractor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0_2_2_4, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 650
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=False ):
lowercase_ = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowercase_ = ""
else:
lowercase_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
lowercase_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ = in_proj_weight[
: config.hidden_size, :
]
lowercase_ = in_proj_bias[: config.hidden_size]
lowercase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = dct.pop(_lowerCAmelCase )
lowercase_ = val
def UpperCAmelCase_ ( ):
lowercase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase_ = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=False ):
lowercase_ = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=_lowerCAmelCase , )
lowercase_ = ViTHybridConfig(backbone_config=_lowerCAmelCase , image_size=3_8_4 , num_labels=1_0_0_0 )
lowercase_ = False
# load original model from timm
lowercase_ = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase_ = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
lowercase_ = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase_ = "huggingface/label-files"
lowercase_ = "imagenet-1k-id2label.json"
lowercase_ = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
lowercase_ = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowercase_ = ViTHybridModel(_lowerCAmelCase ).eval()
else:
lowercase_ = ViTHybridForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# create image processor
lowercase_ = create_transform(**resolve_data_config({} , model=_lowerCAmelCase ) )
lowercase_ = transform.transforms
lowercase_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
lowercase_ = ViTHybridImageProcessor(
do_resize=_lowerCAmelCase , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCAmelCase , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=_lowerCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowercase_ = prepare_img()
lowercase_ = transform(_lowerCAmelCase ).unsqueeze(0 )
lowercase_ = processor(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase )
# verify logits
with torch.no_grad():
lowercase_ = model(_lowerCAmelCase )
lowercase_ = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
lowercase_ = timm_model.forward_features(_lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1e-3 )
else:
lowercase_ = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
a = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 716
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
# TODO Update this
a = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Tuple = 'esm'
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Dict=3_072 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=1_026 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Dict=1e-12 , UpperCamelCase__ : List[str]="absolute" , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
lowercase_ = emb_layer_norm_before
lowercase_ = token_dropout
lowercase_ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
lowercase_ = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = EsmFoldConfig(**UpperCamelCase__ )
lowercase_ = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
lowercase_ = get_default_vocab_list()
else:
lowercase_ = vocab_list
else:
lowercase_ = None
lowercase_ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , UpperCamelCase__ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
lowercase_ = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : "TrunkConfig" = None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if self.trunk is None:
lowercase_ = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
lowercase_ = TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = asdict(self )
lowercase_ = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = 48
__SCREAMING_SNAKE_CASE : int = 1024
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Optional[int] = 128
__SCREAMING_SNAKE_CASE : "StructureModuleConfig" = None
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.structure_module is None:
lowercase_ = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
lowercase_ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowercase_ = self.sequence_state_dim // self.sequence_head_width
lowercase_ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = asdict(self )
lowercase_ = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = 384
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 16
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 12
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : float = 0.1
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : int = 7
__SCREAMING_SNAKE_CASE : int = 10
__SCREAMING_SNAKE_CASE : float = 1e-8
__SCREAMING_SNAKE_CASE : float = 1e5
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return asdict(self )
def UpperCAmelCase_ ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 650
| 0
|
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a = '''__DUMMY_TRANSFORMERS_USER__'''
a = '''Dummy User'''
a = '''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'''
a = '''https://hub-ci.huggingface.co'''
a = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
a = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
a = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def UpperCAmelCase_ ( UpperCAmelCase__ ):
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , UpperCAmelCase__ )
@pytest.fixture
def UpperCAmelCase_ ( UpperCAmelCase__ ):
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , UpperCAmelCase__ )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , UpperCAmelCase__ )
@pytest.fixture
def UpperCAmelCase_ ( UpperCAmelCase__ ):
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , UpperCAmelCase__ )
@pytest.fixture
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
HfFolder.save_token(UpperCAmelCase__ )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def UpperCAmelCase_ ( ):
return HfApi(endpoint=UpperCAmelCase__ )
@pytest.fixture(scope="""session""" )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = HfFolder.get_token()
HfFolder.save_token(UpperCAmelCase__ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(UpperCAmelCase__ )
@pytest.fixture
def UpperCAmelCase_ ( UpperCAmelCase__ ):
def _cleanup_repo(UpperCAmelCase__ ):
hf_api.delete_repo(UpperCAmelCase__ , token=UpperCAmelCase__ , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def UpperCAmelCase_ ( UpperCAmelCase__ ):
@contextmanager
def _temporary_repo(UpperCAmelCase__ ):
try:
yield repo_id
finally:
cleanup_repo(UpperCAmelCase__ )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = F'''repo_txt_data-{int(time.time() * 10e3 )}'''
lowercase_ = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(UpperCAmelCase__ , token=UpperCAmelCase__ , repo_type="""dataset""" , private=UpperCAmelCase__ )
hf_api.upload_file(
token=UpperCAmelCase__ , path_or_fileobj=str(UpperCAmelCase__ ) , path_in_repo="""data/text_data.txt""" , repo_id=UpperCAmelCase__ , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase__ , token=UpperCAmelCase__ , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = F'''repo_zipped_txt_data-{int(time.time() * 10e3 )}'''
lowercase_ = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(UpperCAmelCase__ , token=UpperCAmelCase__ , repo_type="""dataset""" , private=UpperCAmelCase__ )
hf_api.upload_file(
token=UpperCAmelCase__ , path_or_fileobj=str(UpperCAmelCase__ ) , path_in_repo="""data.zip""" , repo_id=UpperCAmelCase__ , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase__ , token=UpperCAmelCase__ , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = F'''repo_zipped_img_data-{int(time.time() * 10e3 )}'''
lowercase_ = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(UpperCAmelCase__ , token=UpperCAmelCase__ , repo_type="""dataset""" , private=UpperCAmelCase__ )
hf_api.upload_file(
token=UpperCAmelCase__ , path_or_fileobj=str(UpperCAmelCase__ ) , path_in_repo="""data.zip""" , repo_id=UpperCAmelCase__ , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase__ , token=UpperCAmelCase__ , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
return hf_private_dataset_repo_zipped_img_data_
| 717
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def UpperCAmelCase_ ( UpperCAmelCase__=None ):
if subparsers is not None:
lowercase_ = subparsers.add_parser("""env""" )
else:
lowercase_ = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=UpperCAmelCase__ , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = torch.__version__
lowercase_ = torch.cuda.is_available()
lowercase_ = is_xpu_available()
lowercase_ = is_npu_available()
lowercase_ = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCAmelCase__ ):
lowercase_ = load_config_from_file(args.config_file ).to_dict()
lowercase_ = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""PyTorch XPU available""": str(UpperCAmelCase__ ),
"""PyTorch NPU available""": str(UpperCAmelCase__ ),
"""System RAM""": F'''{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB''',
}
if pt_cuda_available:
lowercase_ = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
lowercase_ = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
else F'''\t{accelerate_config}'''
)
print(UpperCAmelCase__ )
lowercase_ = accelerate_config
return info
def UpperCAmelCase_ ( ):
lowercase_ = env_command_parser()
lowercase_ = parser.parse_args()
env_command(UpperCAmelCase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 650
| 0
|
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
lowercase_ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__lowerCAmelCase ) )
return round(__lowerCAmelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCamelCase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Tuple=30 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : int=32 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=2 , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = scope
lowercase_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase_ = (image_size // patch_size) ** 2
lowercase_ = num_patches + 2
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
'''simple docstring'''
lowercase_ = DeiTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = DeiTForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = DeiTForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : str = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = DeiTModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=False ):
'''simple docstring'''
lowercase_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(UpperCamelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowercase_ = model(**UpperCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase_ = False
lowercase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase_ = model_class(UpperCamelCase__ )
model.gradient_checkpointing_enable()
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowercase_ = model(**UpperCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(UpperCamelCase__ ),
*get_values(UpperCamelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
lowercase_ = problem_type["""title"""]
lowercase_ = problem_type["""num_labels"""]
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if problem_type["num_labels"] > 1:
lowercase_ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowercase_ = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=UpperCamelCase__ ) as warning_list:
lowercase_ = model(**UpperCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DeiTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCAmelCase_ ( ):
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
UpperCamelCase__ )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCamelCase__ )
# verify the logits
lowercase_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowercase_ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
lowercase_ = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ )
| 650
| 0
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowercase_ = grid[0]
for row_n in range(1 , len(lowercase__ ) ):
lowercase_ = grid[row_n]
lowercase_ = fill_row(lowercase__ , lowercase__ )
lowercase_ = grid[row_n]
return grid[-1][-1]
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
current_row[0] += row_above[0]
for cell_n in range(1 , len(lowercase__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 650
| 0
|
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = False ):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
lowercase_ = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
lowercase_ = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(UpperCamelCase__ , 1 ):
if n < _p:
# then we have our last prime to check
lowercase_ = primes[:idx]
break
lowercase_ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowercase_ = False
for r in range(UpperCamelCase__ ):
lowercase_ = pow(UpperCamelCase__ , d * 2**r , UpperCamelCase__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowercase_ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase_ ( ):
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 720
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizer
__SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizerFast
__SCREAMING_SNAKE_CASE : List[Any] = True
__SCREAMING_SNAKE_CASE : int = True
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 1_008 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
lowercase_ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCamelCase__ , f.name )
lowercase_ = XGLMTokenizer(f.name , keep_accents=UpperCamelCase__ )
lowercase_ = pickle.dumps(UpperCamelCase__ )
pickle.loads(UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(UpperCamelCase__ )
lowercase_ = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """Hello World!"""
lowercase_ = [2, 31_227, 4_447, 35]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
lowercase_ = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = {
"""input_ids""": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""facebook/xglm-564M""" , padding=UpperCamelCase__ , )
| 650
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 721
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
a = None
a = logging.get_logger(__name__)
a = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
a = {
't5-small': 5_1_2,
't5-base': 5_1_2,
't5-large': 5_1_2,
't5-3b': 5_1_2,
't5-11b': 5_1_2,
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : Dict = TaTokenizer
__SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : int , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : Optional[Any]="<pad>" , UpperCamelCase__ : Union[str, Any]=100 , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowercase_ = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase_ = len(set(filter(lambda UpperCamelCase__ : bool("""extra_id_""" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
lowercase_ = vocab_file
lowercase_ = False if not self.vocab_file else True
lowercase_ = extra_ids
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCamelCase__ , )
return max_model_length
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase_ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(R"""<extra_id_\d+>""" , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 650
| 0
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Dict = 1
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : List[str] = None
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(A__ ) for k, v in self.__dict__.items()} )
| 700
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionDiffEditPipeline
__SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : Any = frozenset([] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , )
lowercase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
lowercase_ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_zero=UpperCamelCase__ , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase_ = CLIPTextModel(UpperCamelCase__ )
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Any=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
lowercase_ = self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe_loaded(**UpperCamelCase__ )[0]
lowercase_ = np.abs(output - output_loaded ).max()
self.assertLess(UpperCamelCase__ , 1e-4 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_mask_inputs(UpperCamelCase__ )
lowercase_ = pipe.generate_mask(**UpperCamelCase__ )
lowercase_ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowercase_ = np.array([0] * 9 )
lowercase_ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
lowercase_ = DPMSolverMultistepScheduler(**UpperCamelCase__ )
lowercase_ = DPMSolverMultistepInverseScheduler(**UpperCamelCase__ )
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
lowercase_ = raw_image.convert("""RGB""" ).resize((768, 768) )
lowercase_ = raw_image
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase_ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ , num_inference_steps=25 , ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 650
| 0
|
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a = '▁'
a = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = BigBirdTokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = BigBirdTokenizerFast
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : Tuple = True
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
super().setUp()
lowercase_ = self.tokenizer_class(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = """<s>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """[MASK]""" )
self.assertEqual(len(UpperCamelCase__ ) , 1_004 )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(UpperCamelCase__ )
lowercase_ = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = BigBirdTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
lowercase_ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [285, 46, 10, 170, 382] , )
lowercase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = """Hello World!"""
lowercase_ = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
lowercase_ = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@require_torch
@slow
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowercase_ = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase_ = """ """.join(UpperCamelCase__ )
lowercase_ = self.big_tokenizer.encode_plus(UpperCamelCase__ , return_tensors="""pt""" , return_token_type_ids=UpperCamelCase__ )
lowercase_ = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=UpperCamelCase__ )
lowercase_ = BigBirdConfig(attention_type="""original_full""" )
lowercase_ = BigBirdModel(UpperCamelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCamelCase__ )
model(**UpperCamelCase__ )
@slow
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
lowercase_ = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids )
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = {"""input_ids""": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , )
| 701
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = ['pixel_values']
def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : int = 8 , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_pad
lowercase_ = pad_size
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ )
lowercase_ = (old_height // size + 1) * size - old_height
lowercase_ = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase__ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_pad if do_pad is not None else self.do_pad
lowercase_ = pad_size if pad_size is not None else self.pad_size
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_pad:
lowercase_ = [self.pad(UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650
| 0
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase__ ( __snake_case , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = MgpstrTokenizer
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : str = {}
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# fmt: off
lowercase_ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowercase_ = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + """\n""" )
def UpperCAmelCase__ ( self : int , **UpperCamelCase__ : int ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Dict ):
'''simple docstring'''
lowercase_ = """tester"""
lowercase_ = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.get_tokenizers(do_lower_case=__UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase_ = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
lowercase_ = tokenizer.encode([special_token] , add_special_tokens=__UpperCamelCase )
self.assertEqual(len(__UpperCamelCase ) , 1 )
lowercase_ = tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase_ , lowercase_ = self.get_input_output_texts(__UpperCamelCase )
lowercase_ = tokenizer.tokenize(__UpperCamelCase )
lowercase_ = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
lowercase_ = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
lowercase_ = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertNotEqual(len(__UpperCamelCase ) , 0 )
lowercase_ = tokenizer.decode(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(text_a.replace(""" """ , """""" ) , __UpperCamelCase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
| 702
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError("""Input value must be an 'int' type""" )
lowercase_ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = 3_8_4
if "tiny" in model_name:
lowercase_ = [3, 3, 9, 3]
lowercase_ = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "small" in model_name:
lowercase_ = [3, 3, 2_7, 3]
lowercase_ = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "base" in model_name:
lowercase_ = [3, 3, 2_7, 3]
lowercase_ = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4]
lowercase_ = 5_1_2
if "large" in model_name:
lowercase_ = [3, 3, 2_7, 3]
lowercase_ = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6]
lowercase_ = 7_6_8
if "xlarge" in model_name:
lowercase_ = [3, 3, 2_7, 3]
lowercase_ = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8]
lowercase_ = 1_0_2_4
# set label information
lowercase_ = 1_5_0
lowercase_ = '''huggingface/label-files'''
lowercase_ = '''ade20k-id2label.json'''
lowercase_ = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
lowercase_ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
lowercase_ = {v: k for k, v in idalabel.items()}
lowercase_ = ConvNextConfig(
depths=lowerCAmelCase_ , hidden_sizes=lowerCAmelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
lowercase_ = UperNetConfig(
backbone_config=lowerCAmelCase_ , auxiliary_in_channels=lowerCAmelCase_ , num_labels=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , labelaid=lowerCAmelCase_ , )
return config
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = dct.pop(lowerCAmelCase_ )
lowercase_ = val
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
lowercase_ = model_name_to_url[model_name]
lowercase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )['''state_dict''']
lowercase_ = get_upernet_config(lowerCAmelCase_ )
lowercase_ = UperNetForSemanticSegmentation(lowerCAmelCase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowercase_ = state_dict.pop(lowerCAmelCase_ )
if "bn" in key:
lowercase_ = key.replace("""bn""" , """batch_norm""" )
lowercase_ = val
# rename keys
lowercase_ = create_rename_keys(lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# verify on image
lowercase_ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
lowercase_ = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ).convert("""RGB""" )
lowercase_ = SegformerImageProcessor()
lowercase_ = processor(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
lowercase_ = model(lowerCAmelCase_ )
if model_name == "upernet-convnext-tiny":
lowercase_ = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
lowercase_ = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
lowercase_ = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
lowercase_ = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
lowercase_ = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[F'''upernet-convnext-{size}''' for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 703
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ):
@register_to_config
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : bool = False , ):
'''simple docstring'''
super().__init__()
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = False
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
lowercase_ = TaConfig(
vocab_size=UpperCamelCase__ , d_model=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_kv=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , feed_forward_proj=UpperCamelCase__ , is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , )
lowercase_ = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
lowercase_ = TaBlock(UpperCamelCase__ )
self.encoders.append(UpperCamelCase__ )
lowercase_ = TaLayerNorm(UpperCamelCase__ )
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = self.token_embedder(UpperCamelCase__ )
lowercase_ = encoder_input_tokens.shape[1]
lowercase_ = torch.arange(UpperCamelCase__ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase__ )
lowercase_ = self.dropout_pre(UpperCamelCase__ )
# inverted the attention mask
lowercase_ = encoder_input_tokens.size()
lowercase_ = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ )
for lyr in self.encoders:
lowercase_ = lyr(UpperCamelCase__ , UpperCamelCase__ )[0]
lowercase_ = self.layer_norm(UpperCamelCase__ )
return self.dropout_post(UpperCamelCase__ ), encoder_inputs_mask
| 650
| 0
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
a = logging.getLogger(__name__)
a = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_UpperCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_UpperCAmelCase )} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_UpperCAmelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=_UpperCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__SCREAMING_SNAKE_CASE : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=_UpperCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=_UpperCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=_UpperCAmelCase , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=_UpperCAmelCase , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__SCREAMING_SNAKE_CASE : float = field(
default=0.1_5 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=_UpperCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
if self.train_file is not None:
lowercase_ = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowercase_ = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = [json.loads(SCREAMING_SNAKE_CASE__ ) for line in f.read().splitlines() if (len(SCREAMING_SNAKE_CASE__ ) > 0 and not line.isspace())]
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
lowercase_ = {c: dataset[c] for c in dataset.column_names}
lowercase_ = refs
return Dataset.from_dict(SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase_ ( ):
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowercase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , SCREAMING_SNAKE_CASE__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[:{data_args.validation_split_percentage}%]''' , )
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[{data_args.validation_split_percentage}%:]''' , )
else:
lowercase_ = {}
if data_args.train_file is not None:
lowercase_ = data_args.train_file
if data_args.validation_file is not None:
lowercase_ = data_args.validation_file
lowercase_ = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
lowercase_ = """text"""
lowercase_ = load_dataset(SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase_ = AutoConfig.from_pretrained(model_args.config_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
lowercase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
lowercase_ = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowercase_ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
lowercase_ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
lowercase_ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowercase_ = AutoModelForMaskedLM.from_config(SCREAMING_SNAKE_CASE__ )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowercase_ = datasets["""train"""].column_names
else:
lowercase_ = datasets["""validation"""].column_names
lowercase_ = """text""" if """text""" in column_names else column_names[0]
lowercase_ = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(UpperCAmelCase__ ):
# Remove empty lines
lowercase_ = [line for line in examples["""text"""] if len(SCREAMING_SNAKE_CASE__ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=data_args.max_seq_length )
lowercase_ = datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowercase_ = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowercase_ = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowercase_ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowercase_ = False
# Data collator
# This one will take care of randomly masking the tokens.
lowercase_ = DataCollatorForWholeWordMask(tokenizer=SCREAMING_SNAKE_CASE__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase_ = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowercase_ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowercase_ = model_args.model_name_or_path
else:
lowercase_ = None
lowercase_ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowercase_ = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
lowercase_ = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase_ = trainer.evaluate()
lowercase_ = math.exp(eval_output["""eval_loss"""] )
lowercase_ = perplexity
lowercase_ = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
return results
def UpperCAmelCase_ ( UpperCAmelCase__ ):
main()
if __name__ == "__main__":
main()
| 704
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a = TypeVar('T')
class UpperCamelCase__ ( Generic[T] ):
__SCREAMING_SNAKE_CASE : deque[T] # Cache store of keys
__SCREAMING_SNAKE_CASE : set[T] # References of the keys in cache
__SCREAMING_SNAKE_CASE : int = 10 # Maximum capacity of cache
def __init__( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = deque()
lowercase_ = set()
if not n:
lowercase_ = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
lowercase_ = n
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : T ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase_ = self.dq_store.pop()
self.key_reference.remove(UpperCamelCase__ )
else:
self.dq_store.remove(UpperCamelCase__ )
self.dq_store.appendleft(UpperCamelCase__ )
self.key_reference.add(UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
for k in self.dq_store:
print(UpperCamelCase__ )
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
a = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 650
| 0
|
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = len(a_ )
lowercase_ = len(a_ )
lowercase_ = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
lowercase_ = True
for i in range(a_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowercase_ = True
if a[i].islower():
lowercase_ = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
| 0
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Dict = (EulerDiscreteScheduler,)
__SCREAMING_SNAKE_CASE : str = 10
def UpperCAmelCase__ ( self : List[str] , **UpperCamelCase__ : Any ):
'''simple docstring'''
lowercase_ = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**UpperCamelCase__ )
return config
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowercase_ = torch.manual_seed(0 )
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase_ = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ )
lowercase_ = output.prev_sample
lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowercase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowercase_ = torch.manual_seed(0 )
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase_ = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ )
lowercase_ = output.prev_sample
lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 0.0_002 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__ )
lowercase_ = torch.manual_seed(0 )
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowercase_ = sample.to(UpperCamelCase__ )
for t in scheduler.timesteps:
lowercase_ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ )
lowercase_ = output.prev_sample
lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCamelCase__ , use_karras_sigmas=UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__ )
lowercase_ = torch.manual_seed(0 )
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowercase_ = sample.to(UpperCamelCase__ )
for t in scheduler.timesteps:
lowercase_ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ )
lowercase_ = output.prev_sample
lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1e-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1e-3
| 706
|
def UpperCAmelCase_ ( UpperCAmelCase__=2_8_1_2_3 ):
lowercase_ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowercase_ = set()
lowercase_ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(UpperCAmelCase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 650
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a = logging.get_logger(__name__)
a = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCamelCase__ ( _UpperCamelCase , _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = "nat"
__SCREAMING_SNAKE_CASE : Tuple = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , UpperCamelCase__ : str=4 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Optional[int]=64 , UpperCamelCase__ : Any=[3, 4, 6, 5] , UpperCamelCase__ : Optional[int]=[2, 4, 8, 16] , UpperCamelCase__ : List[Any]=7 , UpperCamelCase__ : Optional[Any]=3.0 , UpperCamelCase__ : Any=True , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : int=1e-5 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : str=None , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
super().__init__(**__a )
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = depths
lowercase_ = len(__a )
lowercase_ = num_heads
lowercase_ = kernel_size
lowercase_ = mlp_ratio
lowercase_ = qkv_bias
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = drop_path_rate
lowercase_ = hidden_act
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase_ = int(embed_dim * 2 ** (len(__a ) - 1) )
lowercase_ = layer_scale_init_value
lowercase_ = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(__a ) + 1 )]
lowercase_ = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
| 707
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase__ :
def __init__( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : int=None , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = True
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
lowercase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
# select random slice
lowercase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE : List[str] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """single_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """multi_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = ids_tensor([1, 10] , config.vocab_size )
lowercase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = {"""type""": scaling_type, """factor""": 10.0}
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 650
| 0
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCamelCase__ ( UpperCAmelCase_ ):
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : float = 3.0
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=_snake_case ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} )
@require_cuda
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
lowercase_ = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
lowercase_ = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , _snake_case )
@require_multi_gpu
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_snake_case , env=os.environ.copy() )
if __name__ == "__main__":
a = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
a = Accelerator(kwargs_handlers=[ddp_scaler])
a = torch.nn.Linear(1_0_0, 2_0_0)
a = accelerator.prepare(model)
# Check the values changed in kwargs
a = ''
a = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 708
|
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a = False
a = logging.get_logger(__name__)
a = 'ybelkada/fonts'
def UpperCAmelCase_ ( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
requires_backends(UpperCAmelCase__ , ["""torch"""] )
_check_torch_version()
lowercase_ = image_tensor.unsqueeze(0 )
lowercase_ = torch.nn.functional.unfold(UpperCAmelCase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
lowercase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase__ , UpperCAmelCase__ , -1 )
lowercase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = 3_6 , UpperCAmelCase__ = "black" , UpperCAmelCase__ = "white" , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , ):
requires_backends(UpperCAmelCase__ , """vision""" )
# Add new lines so that each line is no more than 80 characters.
lowercase_ = textwrap.TextWrapper(width=8_0 )
lowercase_ = wrapper.wrap(text=UpperCAmelCase__ )
lowercase_ = """\n""".join(UpperCAmelCase__ )
if font_bytes is not None and font_path is None:
lowercase_ = io.BytesIO(UpperCAmelCase__ )
elif font_path is not None:
lowercase_ = font_path
else:
lowercase_ = hf_hub_download(UpperCAmelCase__ , """Arial.TTF""" )
lowercase_ = ImageFont.truetype(UpperCAmelCase__ , encoding="""UTF-8""" , size=UpperCAmelCase__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
lowercase_ = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , UpperCAmelCase__ ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = temp_draw.textbbox((0, 0) , UpperCAmelCase__ , UpperCAmelCase__ )
# Create the actual image with a bit of padding around the text.
lowercase_ = text_width + left_padding + right_padding
lowercase_ = text_height + top_padding + bottom_padding
lowercase_ = Image.new("""RGB""" , (image_width, image_height) , UpperCAmelCase__ )
lowercase_ = ImageDraw.Draw(UpperCAmelCase__ )
draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase__ , fill=UpperCAmelCase__ , font=UpperCAmelCase__ )
return image
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(UpperCAmelCase__ , """vision""" )
# Convert to PIL image if necessary
lowercase_ = to_pil_image(UpperCAmelCase__ )
lowercase_ = render_text(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase_ = max(header_image.width , image.width )
lowercase_ = int(image.height * (new_width / image.width) )
lowercase_ = int(header_image.height * (new_width / header_image.width) )
lowercase_ = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
lowercase_ = to_numpy_array(UpperCAmelCase__ )
if infer_channel_dimension_format(UpperCAmelCase__ ) == ChannelDimension.LAST:
lowercase_ = to_channel_dimension_format(UpperCAmelCase__ , ChannelDimension.LAST )
return new_image
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Tuple = ['flattened_patches']
def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : int = 2_048 , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
lowercase_ = do_normalize
lowercase_ = do_convert_rgb
lowercase_ = max_patches
lowercase_ = is_vqa
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : dict , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(self.extract_flattened_patches , """torch""" )
_check_torch_version()
# convert to torch
lowercase_ = to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.FIRST )
lowercase_ = torch.from_numpy(UpperCamelCase__ )
lowercase_ , lowercase_ = patch_size["""height"""], patch_size["""width"""]
lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ )
# maximize scale s.t.
lowercase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
lowercase_ = max(min(math.floor(scale * image_height / patch_height ) , UpperCamelCase__ ) , 1 )
lowercase_ = max(min(math.floor(scale * image_width / patch_width ) , UpperCamelCase__ ) , 1 )
lowercase_ = max(num_feasible_rows * patch_height , 1 )
lowercase_ = max(num_feasible_cols * patch_width , 1 )
lowercase_ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=UpperCamelCase__ , antialias=UpperCamelCase__ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
lowercase_ = torch_extract_patches(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = patches.shape
lowercase_ = patches_shape[1]
lowercase_ = patches_shape[2]
lowercase_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
lowercase_ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
lowercase_ = torch.arange(UpperCamelCase__ ).reshape([rows, 1] ).repeat(1 , UpperCamelCase__ ).reshape([rows * columns, 1] )
lowercase_ = torch.arange(UpperCamelCase__ ).reshape([1, columns] ).repeat(UpperCamelCase__ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
lowercase_ = row_ids.to(torch.floataa )
lowercase_ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
lowercase_ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
lowercase_ = torch.nn.functional.pad(UpperCamelCase__ , [0, 0, 0, max_patches - (rows * columns)] ).float()
lowercase_ = to_numpy_array(UpperCamelCase__ )
return result
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict ):
'''simple docstring'''
if image.dtype == np.uinta:
lowercase_ = image.astype(np.floataa )
# take mean across the whole `image`
lowercase_ = np.mean(UpperCamelCase__ )
lowercase_ = np.std(UpperCamelCase__ )
lowercase_ = max(UpperCamelCase__ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase_ = patch_size if patch_size is not None else self.patch_size
lowercase_ = max_patches if max_patches is not None else self.max_patches
lowercase_ = self.is_vqa
if kwargs.get("""data_format""" , UpperCamelCase__ ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase_ = [convert_to_rgb(UpperCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
lowercase_ = kwargs.pop("""font_bytes""" , UpperCamelCase__ )
lowercase_ = kwargs.pop("""font_path""" , UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = [header_text] * len(UpperCamelCase__ )
lowercase_ = [
render_header(UpperCamelCase__ , header_text[i] , font_bytes=UpperCamelCase__ , font_path=UpperCamelCase__ )
for i, image in enumerate(UpperCamelCase__ )
]
if do_normalize:
lowercase_ = [self.normalize(image=UpperCamelCase__ ) for image in images]
# convert to torch tensor and permute
lowercase_ = [
self.extract_flattened_patches(image=UpperCamelCase__ , max_patches=UpperCamelCase__ , patch_size=UpperCamelCase__ )
for image in images
]
# create attention mask in numpy
lowercase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
lowercase_ = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=UpperCamelCase__ )
return encoded_outputs
| 650
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
a = logging.get_logger(__name__)
class UpperCamelCase__ ( _lowerCAmelCase ):
def __init__( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = feature_size
lowercase_ = sampling_rate
lowercase_ = padding_value
lowercase_ = kwargs.pop("""padding_side""" , """right""" )
lowercase_ = kwargs.pop("""return_attention_mask""" , _lowerCAmelCase )
super().__init__(**_lowerCAmelCase )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = True , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , ):
'''simple docstring'''
if isinstance(_lowerCAmelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
lowercase_ = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
F''' to this method that includes {self.model_input_names[0]}, but you provided'''
F''' {list(processed_features.keys() )}''' )
lowercase_ = processed_features[self.model_input_names[0]]
lowercase_ = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_lowerCAmelCase ) == 0:
if return_attention_mask:
lowercase_ = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
lowercase_ = required_input[0]
if isinstance(_lowerCAmelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
lowercase_ = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_lowerCAmelCase ):
lowercase_ = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_lowerCAmelCase ):
lowercase_ = """tf"""
elif is_torch_tensor(_lowerCAmelCase ):
lowercase_ = """pt"""
elif isinstance(_lowerCAmelCase , (int, float, list, tuple, np.ndarray) ):
lowercase_ = """np"""
else:
raise ValueError(
F'''type of {first_element} unknown: {type(_lowerCAmelCase )}. '''
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
lowercase_ = to_numpy(_lowerCAmelCase )
else:
lowercase_ = [to_numpy(_lowerCAmelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
lowercase_ = self._get_padding_strategies(padding=_lowerCAmelCase , max_length=_lowerCAmelCase )
lowercase_ = processed_features[self.model_input_names[0]]
lowercase_ = len(_lowerCAmelCase )
if not all(len(_lowerCAmelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
lowercase_ = []
for i in range(_lowerCAmelCase ):
lowercase_ = {k: v[i] for k, v in processed_features.items()}
# truncation
lowercase_ = self._truncate(
_lowerCAmelCase , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , truncation=_lowerCAmelCase , )
truncated_inputs.append(_lowerCAmelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
lowercase_ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
lowercase_ = PaddingStrategy.MAX_LENGTH
lowercase_ = {}
for i in range(_lowerCAmelCase ):
# padding
lowercase_ = self._pad(
truncated_inputs[i] , max_length=_lowerCAmelCase , padding_strategy=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
lowercase_ = []
if value.dtype is np.dtype(np.floataa ):
lowercase_ = value.astype(np.floataa )
batch_outputs[key].append(_lowerCAmelCase )
return BatchFeature(_lowerCAmelCase , tensor_type=_lowerCAmelCase )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Union[Dict[str, np.ndarray], BatchFeature] , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , ):
'''simple docstring'''
lowercase_ = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
lowercase_ = len(_lowerCAmelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowercase_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowercase_ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_lowerCAmelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
lowercase_ = np.ones(len(_lowerCAmelCase ) , dtype=np.intaa )
if needs_to_be_padded:
lowercase_ = max_length - len(_lowerCAmelCase )
if self.padding_side == "right":
if return_attention_mask:
lowercase_ = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
lowercase_ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
lowercase_ = np.pad(
_lowerCAmelCase , _lowerCAmelCase , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
lowercase_ = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
lowercase_ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
lowercase_ = np.pad(
_lowerCAmelCase , _lowerCAmelCase , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Union[Dict[str, np.ndarray], BatchFeature] , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , ):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
lowercase_ = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowercase_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowercase_ = len(_lowerCAmelCase ) > max_length
if needs_to_be_truncated:
lowercase_ = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
lowercase_ = processed_features["""attention_mask"""][:max_length]
return processed_features
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=None ):
'''simple docstring'''
if padding is not False:
if padding is True:
lowercase_ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase_ = PaddingStrategy(_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase_ = padding
else:
lowercase_ = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 709
|
import cva
import numpy as np
class UpperCamelCase__ :
def __init__( self : List[str] , UpperCamelCase__ : float , UpperCamelCase__ : int ):
'''simple docstring'''
if k in (0.04, 0.06):
lowercase_ = k
lowercase_ = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Optional[int] ):
'''simple docstring'''
return str(self.k )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = cva.imread(UpperCamelCase__ , 0 )
lowercase_ , lowercase_ = img.shape
lowercase_ = []
lowercase_ = img.copy()
lowercase_ = cva.cvtColor(UpperCamelCase__ , cva.COLOR_GRAY2RGB )
lowercase_ , lowercase_ = np.gradient(UpperCamelCase__ )
lowercase_ = dx**2
lowercase_ = dy**2
lowercase_ = dx * dy
lowercase_ = 0.04
lowercase_ = self.window_size // 2
for y in range(UpperCamelCase__ , h - offset ):
for x in range(UpperCamelCase__ , w - offset ):
lowercase_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = (wxx * wyy) - (wxy**2)
lowercase_ = wxx + wyy
lowercase_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
a = HarrisCorner(0.04, 3)
a , a = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 650
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = 3_8_4
lowercase_ = 7
if "tiny" in model_name:
lowercase_ = 9_6
lowercase_ = (2, 2, 6, 2)
lowercase_ = (3, 6, 1_2, 2_4)
elif "small" in model_name:
lowercase_ = 9_6
lowercase_ = (2, 2, 1_8, 2)
lowercase_ = (3, 6, 1_2, 2_4)
elif "base" in model_name:
lowercase_ = 1_2_8
lowercase_ = (2, 2, 1_8, 2)
lowercase_ = (4, 8, 1_6, 3_2)
lowercase_ = 1_2
lowercase_ = 5_1_2
elif "large" in model_name:
lowercase_ = 1_9_2
lowercase_ = (2, 2, 1_8, 2)
lowercase_ = (6, 1_2, 2_4, 4_8)
lowercase_ = 1_2
lowercase_ = 7_6_8
# set label information
lowercase_ = 1_5_0
lowercase_ = """huggingface/label-files"""
lowercase_ = """ade20k-id2label.json"""
lowercase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="""dataset""" ) , """r""" ) )
lowercase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
lowercase_ = {v: k for k, v in idalabel.items()}
lowercase_ = SwinConfig(
embed_dim=snake_case_ , depths=snake_case_ , num_heads=snake_case_ , window_size=snake_case_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
lowercase_ = UperNetConfig(
backbone_config=snake_case_ , auxiliary_in_channels=snake_case_ , num_labels=snake_case_ , idalabel=snake_case_ , labelaid=snake_case_ , )
return config
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.stages.{i}.downsample.reduction.weight''', F'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.weight''', F'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.bias''', F'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = dct.pop(snake_case_ )
lowercase_ = val
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowercase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowercase_ = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
lowercase_ = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ = in_proj_weight[:dim, :]
lowercase_ = in_proj_bias[: dim]
lowercase_ = in_proj_weight[
dim : dim * 2, :
]
lowercase_ = in_proj_bias[
dim : dim * 2
]
lowercase_ = in_proj_weight[
-dim :, :
]
lowercase_ = in_proj_bias[-dim :]
# fmt: on
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = x.shape
lowercase_ = x.reshape(snake_case_ , 4 , in_channel // 4 )
lowercase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(snake_case_ , snake_case_ )
return x
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = x.shape
lowercase_ = x.reshape(snake_case_ , in_channel // 4 , 4 )
lowercase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(snake_case_ , snake_case_ )
return x
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = x.shape[0]
lowercase_ = x.reshape(4 , in_channel // 4 )
lowercase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(snake_case_ )
return x
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = x.shape[0]
lowercase_ = x.reshape(in_channel // 4 , 4 )
lowercase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(snake_case_ )
return x
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
lowercase_ = model_name_to_url[model_name]
lowercase_ = torch.hub.load_state_dict_from_url(snake_case_ , map_location="""cpu""" , file_name=snake_case_ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(snake_case_ , param.shape )
lowercase_ = get_upernet_config(snake_case_ )
lowercase_ = UperNetForSemanticSegmentation(snake_case_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowercase_ = state_dict.pop(snake_case_ )
if "bn" in key:
lowercase_ = key.replace("""bn""" , """batch_norm""" )
lowercase_ = val
# rename keys
lowercase_ = create_rename_keys(snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
read_in_q_k_v(snake_case_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowercase_ = reverse_correct_unfold_reduction_order(snake_case_ )
if "norm" in key:
lowercase_ = reverse_correct_unfold_norm_order(snake_case_ )
model.load_state_dict(snake_case_ )
# verify on image
lowercase_ = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
lowercase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("""RGB""" )
lowercase_ = SegformerImageProcessor()
lowercase_ = processor(snake_case_ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
lowercase_ = model(snake_case_ )
lowercase_ = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowercase_ = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
lowercase_ = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
lowercase_ = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
lowercase_ = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case_ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[F'''upernet-swin-{size}''' for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 710
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
a = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
a = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowercase_ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ = numpy_to_pil(UpperCAmelCase__ )
return images
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if images.ndim == 3:
lowercase_ = images[None, ...]
lowercase_ = (images * 2_5_5).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase_ = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
lowercase_ = [Image.fromarray(UpperCAmelCase__ ) for image in images]
return pil_images
| 650
| 0
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowercase_ = set()
return any(
node not in visited and depth_first_search(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for node in graph )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
visited.add(__UpperCamelCase )
rec_stk.add(__UpperCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__UpperCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 711
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = (UnCLIPScheduler,)
def UpperCAmelCase__ ( self : int , **UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = {
"""num_train_timesteps""": 1_000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**UpperCamelCase__ )
return config
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(variance_type="""fixed_small_log""" )
lowercase_ = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(variance_type="""learned_range""" )
lowercase_ = scheduler_class(**UpperCamelCase__ )
lowercase_ = 0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1e-5
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCamelCase__ )
lowercase_ = scheduler.timesteps
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
lowercase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(25 )
lowercase_ = scheduler.timesteps
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
lowercase_ = None
else:
lowercase_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase_ = scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
| 650
| 0
|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = True , UpperCAmelCase__ = math.inf , UpperCAmelCase__ = -math.inf , UpperCAmelCase__ = math.inf , UpperCAmelCase__ = -math.inf , UpperCAmelCase__ = False , UpperCAmelCase__ = 1_0_0 , UpperCAmelCase__ = 0.01 , UpperCAmelCase__ = 1 , ) -> Union[str, Any]:
lowercase_ = False
lowercase_ = search_prob
lowercase_ = start_temperate
lowercase_ = []
lowercase_ = 0
lowercase_ = None
while not search_end:
lowercase_ = current_state.score()
if best_state is None or current_score > best_state.score():
lowercase_ = current_state
scores.append(lowerCamelCase_ )
iterations += 1
lowercase_ = None
lowercase_ = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowercase_ = random.randint(0 , len(lowerCamelCase_ ) - 1 ) # picking a random neighbor
lowercase_ = neighbors.pop(lowerCamelCase_ )
lowercase_ = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowercase_ = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowercase_ = picked_neighbor
else:
lowercase_ = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowercase_ = picked_neighbor
lowercase_ = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowercase_ = True
else:
lowercase_ = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCamelCase_ ) , lowerCamelCase_ )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
a = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
return (3 * x**2) - (6 * y)
a = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F'''{local_min.score()}'''
)
a = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F'''{local_min.score()}'''
)
| 712
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
a = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the training data.'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the validation data.'} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1_5 , metadata={'help': 'Percent to split off of train for validation.'} )
__SCREAMING_SNAKE_CASE : int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} )
__SCREAMING_SNAKE_CASE : float = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = {}
if self.train_dir is not None:
lowercase_ = self.train_dir
if self.validation_dir is not None:
lowercase_ = self.validation_dir
lowercase_ = data_files if data_files else None
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = field(
default=__magic_name__ , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__magic_name__ )} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
__SCREAMING_SNAKE_CASE : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__SCREAMING_SNAKE_CASE : str = field(default=__magic_name__ , metadata={'help': 'Name or path of preprocessor config.'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={'help': 'Stride to use for the encoder.'} , )
class UpperCamelCase__ :
def __init__( self : Dict , UpperCamelCase__ : List[Any]=192 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : str=0.6 ):
'''simple docstring'''
lowercase_ = input_size
lowercase_ = mask_patch_size
lowercase_ = model_patch_size
lowercase_ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
lowercase_ = self.input_size // self.mask_patch_size
lowercase_ = self.mask_patch_size // self.model_patch_size
lowercase_ = self.rand_size**2
lowercase_ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : int ):
'''simple docstring'''
lowercase_ = np.random.permutation(self.token_count )[: self.mask_count]
lowercase_ = np.zeros(self.token_count , dtype=UpperCamelCase__ )
lowercase_ = 1
lowercase_ = mask.reshape((self.rand_size, self.rand_size) )
lowercase_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = torch.stack([example["""pixel_values"""] for example in examples] )
lowercase_ = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def UpperCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , UpperCAmelCase__ , UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase_ = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCAmelCase__ ) and data_args.train_val_split > 0.0:
lowercase_ = ds["""train"""].train_test_split(data_args.train_val_split )
lowercase_ = split["""train"""]
lowercase_ = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowercase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(UpperCAmelCase__ , """decoder_type""" ):
lowercase_ = """simmim"""
# adapt config
lowercase_ = model_args.image_size if model_args.image_size is not None else config.image_size
lowercase_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowercase_ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowercase_ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowercase_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowercase_ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowercase_ = AutoModelForMaskedImageModeling.from_config(UpperCAmelCase__ )
if training_args.do_train:
lowercase_ = ds["""train"""].column_names
else:
lowercase_ = ds["""validation"""].column_names
if data_args.image_column_name is not None:
lowercase_ = data_args.image_column_name
elif "image" in column_names:
lowercase_ = """image"""
elif "img" in column_names:
lowercase_ = """img"""
else:
lowercase_ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowercase_ = Compose(
[
Lambda(lambda UpperCAmelCase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowercase_ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(UpperCAmelCase__ ):
lowercase_ = [transforms(UpperCAmelCase__ ) for image in examples[image_column_name]]
lowercase_ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
lowercase_ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCAmelCase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
lowercase_ = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCAmelCase__ )
# Initialize our trainer
lowercase_ = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
lowercase_ = None
if training_args.resume_from_checkpoint is not None:
lowercase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ = last_checkpoint
lowercase_ = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase_ = trainer.evaluate()
trainer.log_metrics("""eval""" , UpperCAmelCase__ )
trainer.save_metrics("""eval""" , UpperCAmelCase__ )
# Write model card and (optionally) push to hub
lowercase_ = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase__ )
else:
trainer.create_model_card(**UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 650
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class UpperCamelCase__ ( _A ):
__SCREAMING_SNAKE_CASE : str = 'vit_mae'
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Optional[Any]=3_072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : Optional[Any]=1e-12 , UpperCamelCase__ : Dict=224 , UpperCamelCase__ : List[str]=16 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Any=512 , UpperCamelCase__ : Optional[Any]=8 , UpperCamelCase__ : Any=2_048 , UpperCamelCase__ : str=0.75 , UpperCamelCase__ : Union[str, Any]=False , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = qkv_bias
lowercase_ = decoder_num_attention_heads
lowercase_ = decoder_hidden_size
lowercase_ = decoder_num_hidden_layers
lowercase_ = decoder_intermediate_size
lowercase_ = mask_ratio
lowercase_ = norm_pix_loss
| 713
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : List[Any] = ['pixel_values']
def __init__( self : List[str] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = size if size is not None else {"""shortest_edge""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowercase_ = int((256 / 224) * size["""shortest_edge"""] )
lowercase_ = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
UpperCamelCase__ , size=(size_dict["""height"""], size_dict["""width"""]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[TensorType] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else self.crop_size
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
lowercase_ = [self.resize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_center_crop:
lowercase_ = [self.center_crop(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650
| 0
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = F'''{sampling_rate}'''
lowercase_ = """1"""
lowercase_ = """f32le"""
lowercase_ = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(a__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowercase_ = ffmpeg_process.communicate(a__ )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
lowercase_ = output_stream[0]
lowercase_ = np.frombuffer(a__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = "f32le" , ):
lowercase_ = F'''{sampling_rate}'''
lowercase_ = """1"""
if format_for_conversion == "s16le":
lowercase_ = 2
elif format_for_conversion == "f32le":
lowercase_ = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowercase_ = platform.system()
if system == "Linux":
lowercase_ = """alsa"""
lowercase_ = """default"""
elif system == "Darwin":
lowercase_ = """avfoundation"""
lowercase_ = """:0"""
elif system == "Windows":
lowercase_ = """dshow"""
lowercase_ = """default"""
lowercase_ = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
lowercase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowercase_ = _ffmpeg_stream(a__ , a__ )
for item in iterator:
yield item
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = "f32le" , ):
if stream_chunk_s is not None:
lowercase_ = stream_chunk_s
else:
lowercase_ = chunk_length_s
lowercase_ = ffmpeg_microphone(a__ , a__ , format_for_conversion=a__ )
if format_for_conversion == "s16le":
lowercase_ = np.intaa
lowercase_ = 2
elif format_for_conversion == "f32le":
lowercase_ = np.floataa
lowercase_ = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowercase_ = chunk_length_s / 6
lowercase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a__ , (int, float) ):
lowercase_ = [stride_length_s, stride_length_s]
lowercase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowercase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowercase_ = datetime.datetime.now()
lowercase_ = datetime.timedelta(seconds=a__ )
for item in chunk_bytes_iter(a__ , a__ , stride=(stride_left, stride_right) , stream=a__ ):
# Put everything back in numpy scale
lowercase_ = np.frombuffer(item["""raw"""] , dtype=a__ )
lowercase_ = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
lowercase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False ):
lowercase_ = B""""""
lowercase_ , lowercase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowercase_ = 0
for raw in iterator:
acc += raw
if stream and len(a__ ) < chunk_len:
lowercase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a__ ) >= chunk_len:
# We are flushing the accumulator
lowercase_ = (_stride_left, stride_right)
lowercase_ = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
lowercase_ = False
yield item
lowercase_ = stride_left
lowercase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a__ ) > stride_left:
lowercase_ = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
lowercase_ = False
yield item
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = 2**2_4 # 16Mo
try:
with subprocess.Popen(a__ , stdout=subprocess.PIPE , bufsize=a__ ) as ffmpeg_process:
while True:
lowercase_ = ffmpeg_process.stdout.read(a__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 714
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 650
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = DPTConfig()
if "large" in checkpoint_url:
lowercase_ = 1_0_2_4
lowercase_ = 4_0_9_6
lowercase_ = 2_4
lowercase_ = 1_6
lowercase_ = [5, 1_1, 1_7, 2_3]
lowercase_ = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
lowercase_ = (1, 3_8_4, 3_8_4)
if "ade" in checkpoint_url:
lowercase_ = True
lowercase_ = 1_5_0
lowercase_ = """huggingface/label-files"""
lowercase_ = """ade20k-id2label.json"""
lowercase_ = json.load(open(cached_download(hf_hub_url(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) ) , """r""" ) )
lowercase_ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
lowercase_ = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase_ = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
lowercase_ = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
lowercase_ = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
lowercase_ = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
lowercase_ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
lowercase_ = name.replace("""proj""" , """projection""" )
if "blocks" in name:
lowercase_ = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
lowercase_ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase_ = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
lowercase_ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase_ = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
lowercase_ = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
lowercase_ = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
lowercase_ = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
lowercase_ = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
lowercase_ = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
lowercase_ = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
lowercase_ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase_ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
lowercase_ = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
lowercase_ = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
lowercase_ = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
lowercase_ = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
lowercase_ = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase_ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase_ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase_ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase_ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase_ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowercase_ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowercase_ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowercase_ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowercase_ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowercase_ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowercase_ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowercase_ = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
lowercase_ = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
lowercase_ = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
lowercase_ = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
lowercase_ = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
lowercase_ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ = in_proj_weight[: config.hidden_size, :]
lowercase_ = in_proj_bias[: config.hidden_size]
lowercase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ ( ):
lowercase_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase_ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ , lowercase_ = get_dpt_config(UpperCamelCase__ )
# load original state_dict from URL
lowercase_ = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(UpperCamelCase__ )
# rename keys
for key in state_dict.copy().keys():
lowercase_ = state_dict.pop(UpperCamelCase__ )
lowercase_ = val
# read in qkv matrices
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
lowercase_ = DPTForSemanticSegmentation(UpperCamelCase__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# Check outputs on an image
lowercase_ = 4_8_0 if """ade""" in checkpoint_url else 3_8_4
lowercase_ = DPTImageProcessor(size=UpperCamelCase__ )
lowercase_ = prepare_img()
lowercase_ = image_processor(UpperCamelCase__ , return_tensors="""pt""" )
# forward pass
lowercase_ = model(**UpperCamelCase__ ).logits if """ade""" in checkpoint_url else model(**UpperCamelCase__ ).predicted_depth
# Assert logits
lowercase_ = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
lowercase_ = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(UpperCamelCase__ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , UpperCamelCase__ )
)
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=UpperCamelCase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=UpperCamelCase__ , )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
a = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 715
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
a = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
for attribute in key.split(""".""" ):
lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
if weight_type is not None:
lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape
else:
lowercase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase_ = value
elif weight_type == "weight_g":
lowercase_ = value
elif weight_type == "weight_v":
lowercase_ = value
elif weight_type == "bias":
lowercase_ = value
else:
lowercase_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = []
lowercase_ = fairseq_model.state_dict()
lowercase_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase_ = None
for name, value in fairseq_dict.items():
lowercase_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == """group""" , )
lowercase_ = True
elif name.split(""".""" )[0] == "proj":
lowercase_ = fairseq_model.proj
lowercase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase_ = True
if "*" in mapped_key:
lowercase_ = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2]
lowercase_ = mapped_key.replace("""*""" , UpperCAmelCase__ )
if "weight_g" in name:
lowercase_ = """weight_g"""
elif "weight_v" in name:
lowercase_ = """weight_v"""
elif "bias" in name:
lowercase_ = """bias"""
elif "weight" in name:
lowercase_ = """weight"""
else:
lowercase_ = None
set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = full_name.split("""conv_layers.""" )[-1]
lowercase_ = name.split(""".""" )
lowercase_ = int(items[0] )
lowercase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ , lowercase_ = emb.weight.shape
lowercase_ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ )
lowercase_ = emb.weight.data
return lin_layer
def UpperCAmelCase_ ( UpperCAmelCase__ ):
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.split(""" """ )[0] for line in lines]
lowercase_ = len(UpperCAmelCase__ )
lowercase_ = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(UpperCAmelCase__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
lowercase_ = WavaVecaConfig.from_pretrained(UpperCAmelCase__ )
lowercase_ = SpeechaTextaConfig.from_pretrained(
UpperCAmelCase__ , vocab_size=UpperCAmelCase__ , decoder_layers=UpperCAmelCase__ , do_stable_layer_norm=UpperCAmelCase__ )
lowercase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
lowercase_ = model[0].eval()
# set weights for wav2vec2 encoder
lowercase_ = WavaVecaModel(UpperCAmelCase__ )
lowercase_ = recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase__ )
lowercase_ = SpeechaTextaForCausalLM(UpperCAmelCase__ )
lowercase_ , lowercase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
lowercase_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowercase_ = SpeechEncoderDecoderModel(encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
lowercase_ = False
# add projection layer
lowercase_ = nn.Parameter(projection_layer.weight )
lowercase_ = nn.Parameter(projection_layer.bias )
lowercase_ = create_vocab_dict(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = SpeechaTextaTokenizer(os.path.join(UpperCAmelCase__ , """vocab.json""" ) )
tokenizer.save_pretrained(UpperCAmelCase__ )
lowercase_ = hf_wavavec.config.to_dict()
lowercase_ = tokenizer.pad_token_id
lowercase_ = tokenizer.bos_token_id
lowercase_ = tokenizer.eos_token_id
lowercase_ = """speech_to_text_2"""
lowercase_ = """wav2vec2"""
lowercase_ = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase__ )
hf_wavavec.save_pretrained(UpperCAmelCase__ )
feature_extractor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0_2_2_4, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 650
| 0
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
a = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
a = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_UpperCAmelCase )[0]
@deprecated(_UpperCAmelCase , """Please use tf.data to implement this functionality.""" )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
lowercase_ = _readaa(_UpperCAmelCase )
if magic != 2_0_5_1:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
lowercase_ = _readaa(_UpperCAmelCase )
lowercase_ = _readaa(_UpperCAmelCase )
lowercase_ = _readaa(_UpperCAmelCase )
lowercase_ = bytestream.read(rows * cols * num_images )
lowercase_ = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta )
lowercase_ = data.reshape(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 1 )
return data
@deprecated(_UpperCAmelCase , """Please use tf.one_hot on tensors.""" )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = labels_dense.shape[0]
lowercase_ = numpy.arange(_UpperCAmelCase ) * num_classes
lowercase_ = numpy.zeros((num_labels, num_classes) )
lowercase_ = 1
return labels_one_hot
@deprecated(_UpperCAmelCase , """Please use tf.data to implement this functionality.""" )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=False , UpperCAmelCase__=1_0 ):
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
lowercase_ = _readaa(_UpperCAmelCase )
if magic != 2_0_4_9:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
lowercase_ = _readaa(_UpperCAmelCase )
lowercase_ = bytestream.read(_UpperCAmelCase )
lowercase_ = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_UpperCAmelCase , _UpperCAmelCase )
return labels
class UpperCamelCase__ :
@deprecated(
__SCREAMING_SNAKE_CASE , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Any=dtypes.floataa , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Any=None , ):
'''simple docstring'''
lowercase_ , lowercase_ = random_seed.get_seed(__SCREAMING_SNAKE_CASE )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowercase_ = dtypes.as_dtype(__SCREAMING_SNAKE_CASE ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
lowercase_ = 10_000
lowercase_ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'''images.shape: {images.shape} labels.shape: {labels.shape}'''
lowercase_ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase_ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase_ = images.astype(numpy.floataa )
lowercase_ = numpy.multiply(__SCREAMING_SNAKE_CASE , 1.0 / 255.0 )
lowercase_ = images
lowercase_ = labels
lowercase_ = 0
lowercase_ = 0
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return self._images
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self._labels
@property
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return self._num_examples
@property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return self._epochs_completed
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Optional[int]=True ):
'''simple docstring'''
if fake_data:
lowercase_ = [1] * 784
lowercase_ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__SCREAMING_SNAKE_CASE )],
[fake_label for _ in range(__SCREAMING_SNAKE_CASE )],
)
lowercase_ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase_ = numpy.arange(self._num_examples )
numpy.random.shuffle(__SCREAMING_SNAKE_CASE )
lowercase_ = self.images[perma]
lowercase_ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase_ = self._num_examples - start
lowercase_ = self._images[start : self._num_examples]
lowercase_ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase_ = numpy.arange(self._num_examples )
numpy.random.shuffle(__SCREAMING_SNAKE_CASE )
lowercase_ = self.images[perm]
lowercase_ = self.labels[perm]
# Start next epoch
lowercase_ = 0
lowercase_ = batch_size - rest_num_examples
lowercase_ = self._index_in_epoch
lowercase_ = self._images[start:end]
lowercase_ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowercase_ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_UpperCAmelCase , """Please write your own downloading logic.""" )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
if not gfile.Exists(_UpperCAmelCase ):
gfile.MakeDirs(_UpperCAmelCase )
lowercase_ = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not gfile.Exists(_UpperCAmelCase ):
urllib.request.urlretrieve(_UpperCAmelCase , _UpperCAmelCase ) # noqa: S310
with gfile.GFile(_UpperCAmelCase ) as f:
lowercase_ = f.size()
print("""Successfully downloaded""" , _UpperCAmelCase , _UpperCAmelCase , """bytes.""" )
return filepath
@deprecated(
_UpperCAmelCase , """Please use alternatives such as:""" """ tensorflow_datasets.load(\'mnist\')""" )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=False , UpperCAmelCase__=False , UpperCAmelCase__=dtypes.floataa , UpperCAmelCase__=True , UpperCAmelCase__=5_0_0_0 , UpperCAmelCase__=None , UpperCAmelCase__=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_UpperCAmelCase , one_hot=_UpperCAmelCase , dtype=_UpperCAmelCase , seed=_UpperCAmelCase )
lowercase_ = fake()
lowercase_ = fake()
lowercase_ = fake()
return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
if not source_url: # empty string check
lowercase_ = DEFAULT_SOURCE_URL
lowercase_ = """train-images-idx3-ubyte.gz"""
lowercase_ = """train-labels-idx1-ubyte.gz"""
lowercase_ = """t10k-images-idx3-ubyte.gz"""
lowercase_ = """t10k-labels-idx1-ubyte.gz"""
lowercase_ = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + train_images_file )
with gfile.Open(_UpperCAmelCase , """rb""" ) as f:
lowercase_ = _extract_images(_UpperCAmelCase )
lowercase_ = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + train_labels_file )
with gfile.Open(_UpperCAmelCase , """rb""" ) as f:
lowercase_ = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase )
lowercase_ = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + test_images_file )
with gfile.Open(_UpperCAmelCase , """rb""" ) as f:
lowercase_ = _extract_images(_UpperCAmelCase )
lowercase_ = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + test_labels_file )
with gfile.Open(_UpperCAmelCase , """rb""" ) as f:
lowercase_ = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase )
if not 0 <= validation_size <= len(_UpperCAmelCase ):
lowercase_ = (
"""Validation size should be between 0 and """
F'''{len(_UpperCAmelCase )}. Received: {validation_size}.'''
)
raise ValueError(_UpperCAmelCase )
lowercase_ = train_images[:validation_size]
lowercase_ = train_labels[:validation_size]
lowercase_ = train_images[validation_size:]
lowercase_ = train_labels[validation_size:]
lowercase_ = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
lowercase_ = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
lowercase_ = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
lowercase_ = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
| 716
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
# TODO Update this
a = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Tuple = 'esm'
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Dict=3_072 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=1_026 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Dict=1e-12 , UpperCamelCase__ : List[str]="absolute" , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
lowercase_ = emb_layer_norm_before
lowercase_ = token_dropout
lowercase_ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
lowercase_ = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = EsmFoldConfig(**UpperCamelCase__ )
lowercase_ = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
lowercase_ = get_default_vocab_list()
else:
lowercase_ = vocab_list
else:
lowercase_ = None
lowercase_ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , UpperCamelCase__ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
lowercase_ = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : "TrunkConfig" = None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if self.trunk is None:
lowercase_ = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
lowercase_ = TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = asdict(self )
lowercase_ = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = 48
__SCREAMING_SNAKE_CASE : int = 1024
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Optional[int] = 128
__SCREAMING_SNAKE_CASE : "StructureModuleConfig" = None
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.structure_module is None:
lowercase_ = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
lowercase_ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowercase_ = self.sequence_state_dim // self.sequence_head_width
lowercase_ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = asdict(self )
lowercase_ = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = 384
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 16
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 12
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : float = 0.1
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : int = 7
__SCREAMING_SNAKE_CASE : int = 10
__SCREAMING_SNAKE_CASE : float = 1e-8
__SCREAMING_SNAKE_CASE : float = 1e5
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return asdict(self )
def UpperCAmelCase_ ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 650
| 0
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a = logging.get_logger(__name__)
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
def constraint_to_multiple_of(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=0 , UpperCAmelCase__=None ):
lowercase_ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowercase_ = math.floor(val / multiple ) * multiple
if x < min_val:
lowercase_ = math.ceil(val / multiple ) * multiple
return x
lowercase_ = (output_size, output_size) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else output_size
lowercase_ = get_image_size(UpperCAmelCase__ )
lowercase_ = output_size
# determine new height and width
lowercase_ = output_height / input_height
lowercase_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowercase_ = scale_width
else:
# fit height
lowercase_ = scale_height
lowercase_ = constraint_to_multiple_of(scale_height * input_height , multiple=UpperCAmelCase__ )
lowercase_ = constraint_to_multiple_of(scale_width * input_width , multiple=UpperCAmelCase__ )
return (new_height, new_width)
class UpperCamelCase__ ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE : Any = ['pixel_values']
def __init__( self : Optional[int] , UpperCamelCase__ : Any = True , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = PILImageResampling.BILINEAR , UpperCamelCase__ : Union[str, Any] = False , UpperCamelCase__ : str = 1 , UpperCamelCase__ : str = True , UpperCamelCase__ : Tuple = 1 / 255 , UpperCamelCase__ : str = True , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
super().__init__(**__a )
lowercase_ = size if size is not None else {'height': 384, 'width': 384}
lowercase_ = get_size_dict(__a )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = keep_aspect_ratio
lowercase_ = ensure_multiple_of
lowercase_ = resample
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict = False , UpperCamelCase__ : List[str] = 1 , UpperCamelCase__ : Optional[Any] = PILImageResampling.BICUBIC , UpperCamelCase__ : List[str] = None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
lowercase_ = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowercase_ = get_resize_output_image_size(
__a , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=__a , multiple=__a , )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] = None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
return rescale(__a , scale=__a , data_format=__a , **__a )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int = None , UpperCamelCase__ : Union[str, Any] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Any = None , UpperCamelCase__ : Optional[Any] = None , UpperCamelCase__ : Dict = None , UpperCamelCase__ : int = None , UpperCamelCase__ : int = None , UpperCamelCase__ : Union[str, Any] = None , UpperCamelCase__ : Any = None , UpperCamelCase__ : List[Any] = None , UpperCamelCase__ : Tuple = ChannelDimension.FIRST , **UpperCamelCase__ : int , ):
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(__a )
lowercase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowercase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(__a ) for image in images]
if do_resize:
lowercase_ = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
lowercase_ = [to_channel_dimension_format(__a , __a ) for image in images]
lowercase_ = {'pixel_values': images}
return BatchFeature(data=__a , tensor_type=__a )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any = None ):
'''simple docstring'''
lowercase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__a ) != len(__a ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__a ):
lowercase_ = target_sizes.numpy()
lowercase_ = []
for idx in range(len(__a ) ):
lowercase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__a )
lowercase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__a )
else:
lowercase_ = logits.argmax(dim=1 )
lowercase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 717
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def UpperCAmelCase_ ( UpperCAmelCase__=None ):
if subparsers is not None:
lowercase_ = subparsers.add_parser("""env""" )
else:
lowercase_ = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=UpperCAmelCase__ , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = torch.__version__
lowercase_ = torch.cuda.is_available()
lowercase_ = is_xpu_available()
lowercase_ = is_npu_available()
lowercase_ = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCAmelCase__ ):
lowercase_ = load_config_from_file(args.config_file ).to_dict()
lowercase_ = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""PyTorch XPU available""": str(UpperCAmelCase__ ),
"""PyTorch NPU available""": str(UpperCAmelCase__ ),
"""System RAM""": F'''{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB''',
}
if pt_cuda_available:
lowercase_ = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
lowercase_ = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
else F'''\t{accelerate_config}'''
)
print(UpperCAmelCase__ )
lowercase_ = accelerate_config
return info
def UpperCAmelCase_ ( ):
lowercase_ = env_command_parser()
lowercase_ = parser.parse_args()
env_command(UpperCAmelCase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 650
| 0
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
a = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
a = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowercase_ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ = numpy_to_pil(_lowercase )
return images
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if images.ndim == 3:
lowercase_ = images[None, ...]
lowercase_ = (images * 2_5_5).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase_ = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
lowercase_ = [Image.fromarray(_lowercase ) for image in images]
return pil_images
| 718
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCamelCase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Tuple=30 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : int=32 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=2 , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = scope
lowercase_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase_ = (image_size // patch_size) ** 2
lowercase_ = num_patches + 2
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
'''simple docstring'''
lowercase_ = DeiTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = DeiTForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = DeiTForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : str = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = DeiTModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=False ):
'''simple docstring'''
lowercase_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(UpperCamelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowercase_ = model(**UpperCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase_ = False
lowercase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase_ = model_class(UpperCamelCase__ )
model.gradient_checkpointing_enable()
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowercase_ = model(**UpperCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(UpperCamelCase__ ),
*get_values(UpperCamelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
lowercase_ = problem_type["""title"""]
lowercase_ = problem_type["""num_labels"""]
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if problem_type["num_labels"] > 1:
lowercase_ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowercase_ = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=UpperCamelCase__ ) as warning_list:
lowercase_ = model(**UpperCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DeiTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCAmelCase_ ( ):
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
UpperCamelCase__ )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCamelCase__ )
# verify the logits
lowercase_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowercase_ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
lowercase_ = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ )
| 650
| 0
|
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCAmelCase_ ( ):
print(sum_of_series(1 , 1 , 1_0 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 650
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 720
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizer
__SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizerFast
__SCREAMING_SNAKE_CASE : List[Any] = True
__SCREAMING_SNAKE_CASE : int = True
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 1_008 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
lowercase_ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCamelCase__ , f.name )
lowercase_ = XGLMTokenizer(f.name , keep_accents=UpperCamelCase__ )
lowercase_ = pickle.dumps(UpperCamelCase__ )
pickle.loads(UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(UpperCamelCase__ )
lowercase_ = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """Hello World!"""
lowercase_ = [2, 31_227, 4_447, 35]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
lowercase_ = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = {
"""input_ids""": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""facebook/xglm-564M""" , padding=UpperCamelCase__ , )
| 650
| 0
|
'''simple docstring'''
from functools import reduce
a = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def UpperCAmelCase_ ( UpperCAmelCase__ = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda UpperCAmelCase__ , UpperCAmelCase__ : str(int(lowerCAmelCase__ ) * int(lowerCAmelCase__ ) ) , n[i : i + 1_3] ) )
for i in range(len(lowerCAmelCase__ ) - 1_2 ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 721
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
a = None
a = logging.get_logger(__name__)
a = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
a = {
't5-small': 5_1_2,
't5-base': 5_1_2,
't5-large': 5_1_2,
't5-3b': 5_1_2,
't5-11b': 5_1_2,
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : Dict = TaTokenizer
__SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : int , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : Optional[Any]="<pad>" , UpperCamelCase__ : Union[str, Any]=100 , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowercase_ = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase_ = len(set(filter(lambda UpperCamelCase__ : bool("""extra_id_""" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
lowercase_ = vocab_file
lowercase_ = False if not self.vocab_file else True
lowercase_ = extra_ids
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCamelCase__ , )
return max_model_length
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase_ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(R"""<extra_id_\d+>""" , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 650
| 0
|
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
a = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def UpperCAmelCase_ ( ):
lowercase_ = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowercase_ = get_sagemaker_input()
else:
lowercase_ = get_cluster_input()
return config
def UpperCAmelCase_ ( UpperCAmelCase__=None ):
if subparsers is not None:
lowercase_ = subparsers.add_parser("""config""" , description=_lowerCamelCase )
else:
lowercase_ = argparse.ArgumentParser("""Accelerate config command""" , description=_lowerCamelCase )
parser.add_argument(
"""--config_file""" , default=_lowerCamelCase , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have """
"""such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed """
"""with \'huggingface\'."""
) , )
if subparsers is not None:
parser.set_defaults(func=_lowerCamelCase )
return parser
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = get_user_input()
if args.config_file is not None:
lowercase_ = args.config_file
else:
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
lowercase_ = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(_lowerCamelCase )
else:
config.to_yaml_file(_lowerCamelCase )
print(F'''accelerate configuration saved at {config_file}''' )
def UpperCAmelCase_ ( ):
lowercase_ = config_command_parser()
lowercase_ = parser.parse_args()
config_command(_lowerCamelCase )
if __name__ == "__main__":
main()
| 700
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionDiffEditPipeline
__SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : Any = frozenset([] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , )
lowercase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
lowercase_ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_zero=UpperCamelCase__ , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase_ = CLIPTextModel(UpperCamelCase__ )
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Any=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
lowercase_ = self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe_loaded(**UpperCamelCase__ )[0]
lowercase_ = np.abs(output - output_loaded ).max()
self.assertLess(UpperCamelCase__ , 1e-4 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_mask_inputs(UpperCamelCase__ )
lowercase_ = pipe.generate_mask(**UpperCamelCase__ )
lowercase_ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowercase_ = np.array([0] * 9 )
lowercase_ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
lowercase_ = DPMSolverMultistepScheduler(**UpperCamelCase__ )
lowercase_ = DPMSolverMultistepInverseScheduler(**UpperCamelCase__ )
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
lowercase_ = raw_image.convert("""RGB""" ).resize((768, 768) )
lowercase_ = raw_image
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase_ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ , num_inference_steps=25 , ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 650
| 0
|
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase__ ( snake_case_ ):
def __init__( self : List[Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = {}
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : int , *UpperCamelCase__ : Any , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
lowercase_ = super().add_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
""" `placeholder_token` that is not already in the tokenizer.""" )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , *UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any]=1 , **UpperCamelCase__ : Dict ):
'''simple docstring'''
lowercase_ = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
output.append(UpperCamelCase__ )
else:
lowercase_ = []
for i in range(UpperCamelCase__ ):
lowercase_ = placeholder_token + F'''_{i}'''
self.try_adding_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
output.append(UpperCamelCase__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
lowercase_ = output
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[Any]=1.0 ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = []
for i in range(len(UpperCamelCase__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCamelCase__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowercase_ = self.token_map[placeholder_token]
lowercase_ = tokens[: 1 + int(len(UpperCamelCase__ ) * prop_tokens_to_load )]
if vector_shuffle:
lowercase_ = copy.copy(UpperCamelCase__ )
random.shuffle(UpperCamelCase__ )
lowercase_ = text.replace(UpperCamelCase__ , """ """.join(UpperCamelCase__ ) )
return text
def __call__( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , *UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=1.0 , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCamelCase__ , vector_shuffle=UpperCamelCase__ , prop_tokens_to_load=UpperCamelCase__ ) , *UpperCamelCase__ , **UpperCamelCase__ , )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[Any] , *UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[int]=1.0 , **UpperCamelCase__ : int ):
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCamelCase__ , vector_shuffle=UpperCamelCase__ , prop_tokens_to_load=UpperCamelCase__ ) , *UpperCamelCase__ , **UpperCamelCase__ , )
| 701
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = ['pixel_values']
def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : int = 8 , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_pad
lowercase_ = pad_size
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ )
lowercase_ = (old_height // size + 1) * size - old_height
lowercase_ = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase__ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_pad if do_pad is not None else self.do_pad
lowercase_ = pad_size if pad_size is not None else self.pad_size
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_pad:
lowercase_ = [self.pad(UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650
| 0
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
lowercase_ = coefficient_matrix.shape
lowercase_ = constant_matrix.shape
if rowsa != colsa:
lowercase_ = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
if colsa != 1:
lowercase_ = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
if rowsa != rowsa:
lowercase_ = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) != rowsa:
lowercase_ = (
"Number of initial values must be equal to number of rows in coefficient "
F'''matrix but received {len(SCREAMING_SNAKE_CASE_ )} and {rowsa}'''
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
lowercase_ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowercase_ = table.shape
strictly_diagonally_dominant(SCREAMING_SNAKE_CASE_ )
# Iterates the whole matrix for given number of times
for _ in range(SCREAMING_SNAKE_CASE_ ):
lowercase_ = []
for row in range(SCREAMING_SNAKE_CASE_ ):
lowercase_ = 0
for col in range(SCREAMING_SNAKE_CASE_ ):
if col == row:
lowercase_ = table[row][col]
elif col == cols - 1:
lowercase_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowercase_ = (temp + val) / denom
new_val.append(SCREAMING_SNAKE_CASE_ )
lowercase_ = new_val
return [float(SCREAMING_SNAKE_CASE_ ) for i in new_val]
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = table.shape
lowercase_ = True
for i in range(0 , SCREAMING_SNAKE_CASE_ ):
lowercase_ = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError("""Input value must be an 'int' type""" )
lowercase_ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
| 0
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a = logging.get_logger(__name__)
a = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
a = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
a = {'allegro/herbert-base-cased': 5_1_4}
a = {}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = HerbertTokenizer
def __init__( self : int , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[Any]="<s>" , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : Union[str, Any]="<pad>" , UpperCamelCase__ : Union[str, Any]="<mask>" , UpperCamelCase__ : List[str]="</s>" , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , **UpperCamelCase__ , )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = [self.cls_token_id]
lowercase_ = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 703
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ):
@register_to_config
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : bool = False , ):
'''simple docstring'''
super().__init__()
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = False
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
lowercase_ = TaConfig(
vocab_size=UpperCamelCase__ , d_model=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_kv=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , feed_forward_proj=UpperCamelCase__ , is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , )
lowercase_ = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
lowercase_ = TaBlock(UpperCamelCase__ )
self.encoders.append(UpperCamelCase__ )
lowercase_ = TaLayerNorm(UpperCamelCase__ )
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = self.token_embedder(UpperCamelCase__ )
lowercase_ = encoder_input_tokens.shape[1]
lowercase_ = torch.arange(UpperCamelCase__ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase__ )
lowercase_ = self.dropout_pre(UpperCamelCase__ )
# inverted the attention mask
lowercase_ = encoder_input_tokens.size()
lowercase_ = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ )
for lyr in self.encoders:
lowercase_ = lyr(UpperCamelCase__ , UpperCamelCase__ )[0]
lowercase_ = self.layer_norm(UpperCamelCase__ )
return self.dropout_post(UpperCamelCase__ ), encoder_inputs_mask
| 650
| 0
|
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class UpperCamelCase__ :
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
raise NotImplementedError()
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError()
class UpperCamelCase__ ( __A ):
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple = False , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
lowercase_ = tokenizer
lowercase_ = skip_prompt
lowercase_ = decode_kwargs
# variables used in the streaming process
lowercase_ = []
lowercase_ = 0
lowercase_ = True
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
lowercase_ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowercase_ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowercase_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
lowercase_ = text[self.print_len :]
lowercase_ = []
lowercase_ = 0
# If the last token is a CJK character, we print the characters.
elif len(UpperCamelCase__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowercase_ = text[self.print_len :]
self.print_len += len(UpperCamelCase__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowercase_ = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(UpperCamelCase__ )
self.on_finalized_text(UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if len(self.token_cache ) > 0:
lowercase_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowercase_ = text[self.print_len :]
lowercase_ = []
lowercase_ = 0
else:
lowercase_ = ''
lowercase_ = True
self.on_finalized_text(UpperCamelCase__ , stream_end=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str = False ):
'''simple docstring'''
print(UpperCamelCase__ , flush=UpperCamelCase__ , end="""""" if not stream_end else None )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if (
(cp >= 0X4_e_0_0 and cp <= 0X9_f_f_f)
or (cp >= 0X3_4_0_0 and cp <= 0X4_d_b_f) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_a_6_d_f) #
or (cp >= 0X2_a_7_0_0 and cp <= 0X2_b_7_3_f) #
or (cp >= 0X2_b_7_4_0 and cp <= 0X2_b_8_1_f) #
or (cp >= 0X2_b_8_2_0 and cp <= 0X2_c_e_a_f) #
or (cp >= 0Xf_9_0_0 and cp <= 0Xf_a_f_f)
or (cp >= 0X2_f_8_0_0 and cp <= 0X2_f_a_1_f) #
): #
return True
return False
class UpperCamelCase__ ( __A ):
def __init__( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] = False , UpperCamelCase__ : Union[str, Any] = None , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = Queue()
lowercase_ = None
lowercase_ = timeout
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any = False ):
'''simple docstring'''
self.text_queue.put(UpperCamelCase__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : List[Any] ):
'''simple docstring'''
return self
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 704
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a = TypeVar('T')
class UpperCamelCase__ ( Generic[T] ):
__SCREAMING_SNAKE_CASE : deque[T] # Cache store of keys
__SCREAMING_SNAKE_CASE : set[T] # References of the keys in cache
__SCREAMING_SNAKE_CASE : int = 10 # Maximum capacity of cache
def __init__( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = deque()
lowercase_ = set()
if not n:
lowercase_ = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
lowercase_ = n
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : T ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase_ = self.dq_store.pop()
self.key_reference.remove(UpperCamelCase__ )
else:
self.dq_store.remove(UpperCamelCase__ )
self.dq_store.appendleft(UpperCamelCase__ )
self.key_reference.add(UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
for k in self.dq_store:
print(UpperCamelCase__ )
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
a = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 650
| 0
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = checkpoint
lowercase_ = {}
lowercase_ = vae_state_dict["""encoder.conv_in.weight"""]
lowercase_ = vae_state_dict["""encoder.conv_in.bias"""]
lowercase_ = vae_state_dict["""encoder.conv_out.weight"""]
lowercase_ = vae_state_dict["""encoder.conv_out.bias"""]
lowercase_ = vae_state_dict["""encoder.norm_out.weight"""]
lowercase_ = vae_state_dict["""encoder.norm_out.bias"""]
lowercase_ = vae_state_dict["""decoder.conv_in.weight"""]
lowercase_ = vae_state_dict["""decoder.conv_in.bias"""]
lowercase_ = vae_state_dict["""decoder.conv_out.weight"""]
lowercase_ = vae_state_dict["""decoder.conv_out.bias"""]
lowercase_ = vae_state_dict["""decoder.norm_out.weight"""]
lowercase_ = vae_state_dict["""decoder.norm_out.bias"""]
lowercase_ = vae_state_dict["""quant_conv.weight"""]
lowercase_ = vae_state_dict["""quant_conv.bias"""]
lowercase_ = vae_state_dict["""post_quant_conv.weight"""]
lowercase_ = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
lowercase_ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
lowercase_ = {
layer_id: [key for key in vae_state_dict if F'''down.{layer_id}''' in key] for layer_id in range(__snake_case )
}
# Retrieves the keys for the decoder up blocks only
lowercase_ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
lowercase_ = {
layer_id: [key for key in vae_state_dict if F'''up.{layer_id}''' in key] for layer_id in range(__snake_case )
}
for i in range(__snake_case ):
lowercase_ = [key for key in down_blocks[i] if F'''down.{i}''' in key and F'''down.{i}.downsample''' not in key]
if F'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
lowercase_ = vae_state_dict.pop(
F'''encoder.down.{i}.downsample.conv.weight''' )
lowercase_ = vae_state_dict.pop(
F'''encoder.down.{i}.downsample.conv.bias''' )
lowercase_ = renew_vae_resnet_paths(__snake_case )
lowercase_ = {"""old""": F'''down.{i}.block''', """new""": F'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
lowercase_ = [key for key in vae_state_dict if """encoder.mid.block""" in key]
lowercase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowercase_ = [key for key in mid_resnets if F'''encoder.mid.block_{i}''' in key]
lowercase_ = renew_vae_resnet_paths(__snake_case )
lowercase_ = {"""old""": F'''mid.block_{i}''', """new""": F'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
lowercase_ = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
lowercase_ = renew_vae_attention_paths(__snake_case )
lowercase_ = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
conv_attn_to_linear(__snake_case )
for i in range(__snake_case ):
lowercase_ = num_up_blocks - 1 - i
lowercase_ = [
key for key in up_blocks[block_id] if F'''up.{block_id}''' in key and F'''up.{block_id}.upsample''' not in key
]
if F'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
lowercase_ = vae_state_dict[
F'''decoder.up.{block_id}.upsample.conv.weight'''
]
lowercase_ = vae_state_dict[
F'''decoder.up.{block_id}.upsample.conv.bias'''
]
lowercase_ = renew_vae_resnet_paths(__snake_case )
lowercase_ = {"""old""": F'''up.{block_id}.block''', """new""": F'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
lowercase_ = [key for key in vae_state_dict if """decoder.mid.block""" in key]
lowercase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowercase_ = [key for key in mid_resnets if F'''decoder.mid.block_{i}''' in key]
lowercase_ = renew_vae_resnet_paths(__snake_case )
lowercase_ = {"""old""": F'''mid.block_{i}''', """new""": F'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
lowercase_ = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
lowercase_ = renew_vae_attention_paths(__snake_case )
lowercase_ = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
conv_attn_to_linear(__snake_case )
return new_checkpoint
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , ):
# Only support V1
lowercase_ = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
lowercase_ = io.BytesIO(r.content )
lowercase_ = OmegaConf.load(__snake_case )
lowercase_ = 5_1_2
lowercase_ = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
lowercase_ = {}
with safe_open(__snake_case , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
lowercase_ = f.get_tensor(__snake_case )
else:
lowercase_ = torch.load(__snake_case , map_location=__snake_case )["""state_dict"""]
# Convert the VAE model.
lowercase_ = create_vae_diffusers_config(__snake_case , image_size=__snake_case )
lowercase_ = custom_convert_ldm_vae_checkpoint(__snake_case , __snake_case )
lowercase_ = AutoencoderKL(**__snake_case )
vae.load_state_dict(__snake_case )
vae.save_pretrained(__snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
a = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 705
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
| 0
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class UpperCamelCase__ ( __a ):
__SCREAMING_SNAKE_CASE : "DiagonalGaussianDistribution"
class UpperCamelCase__ ( __a , __a ):
__SCREAMING_SNAKE_CASE : Tuple = True
@register_to_config
def __init__( self : Union[str, Any] , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 3 , UpperCamelCase__ : Tuple[str] = ("DownEncoderBlock2D",) , UpperCamelCase__ : Tuple[str] = ("UpDecoderBlock2D",) , UpperCamelCase__ : Tuple[int] = (64,) , UpperCamelCase__ : int = 1 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : int = 4 , UpperCamelCase__ : int = 32 , UpperCamelCase__ : int = 32 , UpperCamelCase__ : float = 0.18_215 , ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
lowercase_ = Encoder(
in_channels=snake_case__ , out_channels=snake_case__ , down_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , double_z=snake_case__ , )
# pass init params to Decoder
lowercase_ = Decoder(
in_channels=snake_case__ , out_channels=snake_case__ , up_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , norm_num_groups=snake_case__ , act_fn=snake_case__ , )
lowercase_ = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
lowercase_ = nn.Convad(snake_case__ , snake_case__ , 1 )
lowercase_ = False
lowercase_ = False
# only relevant if vae tiling is enabled
lowercase_ = self.config.sample_size
lowercase_ = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
lowercase_ = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
lowercase_ = 0.25
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
if isinstance(snake_case__ , (Encoder, Decoder) ):
lowercase_ = value
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : bool = True ):
'''simple docstring'''
lowercase_ = use_tiling
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
self.enable_tiling(snake_case__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = True
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = {}
def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor] ):
if hasattr(snake_case__ , """set_processor""" ):
lowercase_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , snake_case__ , snake_case__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(snake_case__ , snake_case__ , snake_case__ )
return processors
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
lowercase_ = len(self.attn_processors.keys() )
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(snake_case__ )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : List[Any] ):
if hasattr(snake_case__ , """set_processor""" ):
if not isinstance(snake_case__ , snake_case__ ):
module.set_processor(snake_case__ )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , snake_case__ , snake_case__ )
for name, module in self.named_children():
fn_recursive_attn_processor(snake_case__ , snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ):
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(snake_case__ , return_dict=snake_case__ )
if self.use_slicing and x.shape[0] > 1:
lowercase_ = [self.encoder(snake_case__ ) for x_slice in x.split(1 )]
lowercase_ = torch.cat(snake_case__ )
else:
lowercase_ = self.encoder(snake_case__ )
lowercase_ = self.quant_conv(snake_case__ )
lowercase_ = DiagonalGaussianDistribution(snake_case__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=snake_case__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ):
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(snake_case__ , return_dict=snake_case__ )
lowercase_ = self.post_quant_conv(snake_case__ )
lowercase_ = self.decoder(snake_case__ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
@apply_forward_hook
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ):
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
lowercase_ = [self._decode(snake_case__ ).sample for z_slice in z.split(1 )]
lowercase_ = torch.cat(snake_case__ )
else:
lowercase_ = self._decode(snake_case__ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=snake_case__ )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ):
'''simple docstring'''
lowercase_ = min(a.shape[2] , b.shape[2] , snake_case__ )
for y in range(snake_case__ ):
lowercase_ = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
lowercase_ = min(a.shape[3] , b.shape[3] , snake_case__ )
for x in range(snake_case__ ):
lowercase_ = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ):
'''simple docstring'''
lowercase_ = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
lowercase_ = int(self.tile_latent_min_size * self.tile_overlap_factor )
lowercase_ = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
lowercase_ = []
for i in range(0 , x.shape[2] , snake_case__ ):
lowercase_ = []
for j in range(0 , x.shape[3] , snake_case__ ):
lowercase_ = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
lowercase_ = self.encoder(snake_case__ )
lowercase_ = self.quant_conv(snake_case__ )
row.append(snake_case__ )
rows.append(snake_case__ )
lowercase_ = []
for i, row in enumerate(snake_case__ ):
lowercase_ = []
for j, tile in enumerate(snake_case__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
lowercase_ = self.blend_v(rows[i - 1][j] , snake_case__ , snake_case__ )
if j > 0:
lowercase_ = self.blend_h(row[j - 1] , snake_case__ , snake_case__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(snake_case__ , dim=3 ) )
lowercase_ = torch.cat(snake_case__ , dim=2 )
lowercase_ = DiagonalGaussianDistribution(snake_case__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=snake_case__ )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ):
'''simple docstring'''
lowercase_ = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
lowercase_ = int(self.tile_sample_min_size * self.tile_overlap_factor )
lowercase_ = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
lowercase_ = []
for i in range(0 , z.shape[2] , snake_case__ ):
lowercase_ = []
for j in range(0 , z.shape[3] , snake_case__ ):
lowercase_ = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
lowercase_ = self.post_quant_conv(snake_case__ )
lowercase_ = self.decoder(snake_case__ )
row.append(snake_case__ )
rows.append(snake_case__ )
lowercase_ = []
for i, row in enumerate(snake_case__ ):
lowercase_ = []
for j, tile in enumerate(snake_case__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
lowercase_ = self.blend_v(rows[i - 1][j] , snake_case__ , snake_case__ )
if j > 0:
lowercase_ = self.blend_h(row[j - 1] , snake_case__ , snake_case__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(snake_case__ , dim=3 ) )
lowercase_ = torch.cat(snake_case__ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[torch.Generator] = None , ):
'''simple docstring'''
lowercase_ = sample
lowercase_ = self.encode(snake_case__ ).latent_dist
if sample_posterior:
lowercase_ = posterior.sample(generator=snake_case__ )
else:
lowercase_ = posterior.mode()
lowercase_ = self.decode(snake_case__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
| 706
|
def UpperCAmelCase_ ( UpperCAmelCase__=2_8_1_2_3 ):
lowercase_ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowercase_ = set()
lowercase_ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(UpperCAmelCase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 650
| 0
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
a = logging.getLogger(__name__)
a = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowerCamelCase , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__lowerCamelCase )} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowerCamelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=__lowerCamelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__SCREAMING_SNAKE_CASE : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=__lowerCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowerCamelCase , metadata={'help': 'The input training data file (a text file).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=__lowerCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowerCamelCase , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowerCamelCase , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__SCREAMING_SNAKE_CASE : float = field(
default=0.1_5 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=__lowerCamelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if self.train_file is not None:
lowercase_ = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowercase_ = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = [json.loads(lowerCAmelCase_ ) for line in f.read().splitlines() if (len(lowerCAmelCase_ ) > 0 and not line.isspace())]
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
lowercase_ = {c: dataset[c] for c in dataset.column_names}
lowercase_ = refs
return Dataset.from_dict(lowerCAmelCase_ )
def UpperCAmelCase_ ( ):
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowercase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[:{data_args.validation_split_percentage}%]''' , )
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[{data_args.validation_split_percentage}%:]''' , )
else:
lowercase_ = {}
if data_args.train_file is not None:
lowercase_ = data_args.train_file
if data_args.validation_file is not None:
lowercase_ = data_args.validation_file
lowercase_ = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
lowercase_ = "text"
lowercase_ = load_dataset(lowerCAmelCase_ , data_files=lowerCAmelCase_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase_ = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_ )
elif model_args.model_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_ )
else:
lowercase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
lowercase_ = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowercase_ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase_ )
elif model_args.model_name_or_path:
lowercase_ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
lowercase_ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowercase_ = AutoModelForMaskedLM.from_config(lowerCAmelCase_ )
model.resize_token_embeddings(len(lowerCAmelCase_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowercase_ = datasets["train"].column_names
else:
lowercase_ = datasets["validation"].column_names
lowercase_ = "text" if "text" in column_names else column_names[0]
lowercase_ = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(UpperCAmelCase__ ):
# Remove empty lines
lowercase_ = [line for line in examples["text"] if len(lowerCAmelCase_ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=data_args.max_seq_length )
lowercase_ = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowercase_ = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowercase_ = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowercase_ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowercase_ = False
# Data collator
# This one will take care of randomly masking the tokens.
lowercase_ = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase_ = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowercase_ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowercase_ = model_args.model_name_or_path
else:
lowercase_ = None
lowercase_ = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowercase_ = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
lowercase_ = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase_ = trainer.evaluate()
lowercase_ = math.exp(eval_output["""eval_loss"""] )
lowercase_ = perplexity
lowercase_ = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
return results
def UpperCAmelCase_ ( UpperCAmelCase__ ):
main()
if __name__ == "__main__":
main()
| 707
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase__ :
def __init__( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : int=None , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = True
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
lowercase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
# select random slice
lowercase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE : List[str] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """single_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """multi_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = ids_tensor([1, 10] , config.vocab_size )
lowercase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = {"""type""": scaling_type, """factor""": 10.0}
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 650
| 0
|
a = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
a = [{'type': 'code', 'content': INSTALL_CONTENT}]
a = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 708
|
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a = False
a = logging.get_logger(__name__)
a = 'ybelkada/fonts'
def UpperCAmelCase_ ( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
requires_backends(UpperCAmelCase__ , ["""torch"""] )
_check_torch_version()
lowercase_ = image_tensor.unsqueeze(0 )
lowercase_ = torch.nn.functional.unfold(UpperCAmelCase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
lowercase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase__ , UpperCAmelCase__ , -1 )
lowercase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = 3_6 , UpperCAmelCase__ = "black" , UpperCAmelCase__ = "white" , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , ):
requires_backends(UpperCAmelCase__ , """vision""" )
# Add new lines so that each line is no more than 80 characters.
lowercase_ = textwrap.TextWrapper(width=8_0 )
lowercase_ = wrapper.wrap(text=UpperCAmelCase__ )
lowercase_ = """\n""".join(UpperCAmelCase__ )
if font_bytes is not None and font_path is None:
lowercase_ = io.BytesIO(UpperCAmelCase__ )
elif font_path is not None:
lowercase_ = font_path
else:
lowercase_ = hf_hub_download(UpperCAmelCase__ , """Arial.TTF""" )
lowercase_ = ImageFont.truetype(UpperCAmelCase__ , encoding="""UTF-8""" , size=UpperCAmelCase__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
lowercase_ = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , UpperCAmelCase__ ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = temp_draw.textbbox((0, 0) , UpperCAmelCase__ , UpperCAmelCase__ )
# Create the actual image with a bit of padding around the text.
lowercase_ = text_width + left_padding + right_padding
lowercase_ = text_height + top_padding + bottom_padding
lowercase_ = Image.new("""RGB""" , (image_width, image_height) , UpperCAmelCase__ )
lowercase_ = ImageDraw.Draw(UpperCAmelCase__ )
draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase__ , fill=UpperCAmelCase__ , font=UpperCAmelCase__ )
return image
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(UpperCAmelCase__ , """vision""" )
# Convert to PIL image if necessary
lowercase_ = to_pil_image(UpperCAmelCase__ )
lowercase_ = render_text(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase_ = max(header_image.width , image.width )
lowercase_ = int(image.height * (new_width / image.width) )
lowercase_ = int(header_image.height * (new_width / header_image.width) )
lowercase_ = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
lowercase_ = to_numpy_array(UpperCAmelCase__ )
if infer_channel_dimension_format(UpperCAmelCase__ ) == ChannelDimension.LAST:
lowercase_ = to_channel_dimension_format(UpperCAmelCase__ , ChannelDimension.LAST )
return new_image
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Tuple = ['flattened_patches']
def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : int = 2_048 , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
lowercase_ = do_normalize
lowercase_ = do_convert_rgb
lowercase_ = max_patches
lowercase_ = is_vqa
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : dict , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(self.extract_flattened_patches , """torch""" )
_check_torch_version()
# convert to torch
lowercase_ = to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.FIRST )
lowercase_ = torch.from_numpy(UpperCamelCase__ )
lowercase_ , lowercase_ = patch_size["""height"""], patch_size["""width"""]
lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ )
# maximize scale s.t.
lowercase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
lowercase_ = max(min(math.floor(scale * image_height / patch_height ) , UpperCamelCase__ ) , 1 )
lowercase_ = max(min(math.floor(scale * image_width / patch_width ) , UpperCamelCase__ ) , 1 )
lowercase_ = max(num_feasible_rows * patch_height , 1 )
lowercase_ = max(num_feasible_cols * patch_width , 1 )
lowercase_ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=UpperCamelCase__ , antialias=UpperCamelCase__ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
lowercase_ = torch_extract_patches(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = patches.shape
lowercase_ = patches_shape[1]
lowercase_ = patches_shape[2]
lowercase_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
lowercase_ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
lowercase_ = torch.arange(UpperCamelCase__ ).reshape([rows, 1] ).repeat(1 , UpperCamelCase__ ).reshape([rows * columns, 1] )
lowercase_ = torch.arange(UpperCamelCase__ ).reshape([1, columns] ).repeat(UpperCamelCase__ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
lowercase_ = row_ids.to(torch.floataa )
lowercase_ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
lowercase_ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
lowercase_ = torch.nn.functional.pad(UpperCamelCase__ , [0, 0, 0, max_patches - (rows * columns)] ).float()
lowercase_ = to_numpy_array(UpperCamelCase__ )
return result
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict ):
'''simple docstring'''
if image.dtype == np.uinta:
lowercase_ = image.astype(np.floataa )
# take mean across the whole `image`
lowercase_ = np.mean(UpperCamelCase__ )
lowercase_ = np.std(UpperCamelCase__ )
lowercase_ = max(UpperCamelCase__ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase_ = patch_size if patch_size is not None else self.patch_size
lowercase_ = max_patches if max_patches is not None else self.max_patches
lowercase_ = self.is_vqa
if kwargs.get("""data_format""" , UpperCamelCase__ ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase_ = [convert_to_rgb(UpperCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
lowercase_ = kwargs.pop("""font_bytes""" , UpperCamelCase__ )
lowercase_ = kwargs.pop("""font_path""" , UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = [header_text] * len(UpperCamelCase__ )
lowercase_ = [
render_header(UpperCamelCase__ , header_text[i] , font_bytes=UpperCamelCase__ , font_path=UpperCamelCase__ )
for i, image in enumerate(UpperCamelCase__ )
]
if do_normalize:
lowercase_ = [self.normalize(image=UpperCamelCase__ ) for image in images]
# convert to torch tensor and permute
lowercase_ = [
self.extract_flattened_patches(image=UpperCamelCase__ , max_patches=UpperCamelCase__ , patch_size=UpperCamelCase__ )
for image in images
]
# create attention mask in numpy
lowercase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
lowercase_ = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=UpperCamelCase__ )
return encoded_outputs
| 650
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 709
|
import cva
import numpy as np
class UpperCamelCase__ :
def __init__( self : List[str] , UpperCamelCase__ : float , UpperCamelCase__ : int ):
'''simple docstring'''
if k in (0.04, 0.06):
lowercase_ = k
lowercase_ = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Optional[int] ):
'''simple docstring'''
return str(self.k )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = cva.imread(UpperCamelCase__ , 0 )
lowercase_ , lowercase_ = img.shape
lowercase_ = []
lowercase_ = img.copy()
lowercase_ = cva.cvtColor(UpperCamelCase__ , cva.COLOR_GRAY2RGB )
lowercase_ , lowercase_ = np.gradient(UpperCamelCase__ )
lowercase_ = dx**2
lowercase_ = dy**2
lowercase_ = dx * dy
lowercase_ = 0.04
lowercase_ = self.window_size // 2
for y in range(UpperCamelCase__ , h - offset ):
for x in range(UpperCamelCase__ , w - offset ):
lowercase_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = (wxx * wyy) - (wxy**2)
lowercase_ = wxx + wyy
lowercase_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
a = HarrisCorner(0.04, 3)
a , a = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 650
| 0
|
from typing import List
import numpy as np
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = {key: len(A_ ) for key, value in gen_kwargs.items() if isinstance(A_ , A_ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"""Sharding is ambiguous for this dataset: """
+ """we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"""
+ """\n""".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ """\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, """
+ """and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."""
) )
lowercase_ = max(lists_lengths.values() , default=0 )
return max(1 , A_ )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = []
for group_idx in range(A_ ):
lowercase_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowercase_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowercase_ = range(A_ , start + num_shards_to_add )
shards_indices_per_group.append(A_ )
return shards_indices_per_group
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = _number_of_shards_in_gen_kwargs(A_ )
if num_shards == 1:
return [dict(A_ )]
else:
lowercase_ = _distribute_shards(num_shards=A_ , max_num_jobs=A_ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(A_ , A_ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(A_ ) )
]
def UpperCAmelCase_ ( UpperCAmelCase__ ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , A_ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = {len(A_ ) for value in gen_kwargs.values() if isinstance(A_ , A_ )}
lowercase_ = {}
for size in list_sizes:
lowercase_ = list(range(A_ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowercase_ = dict(A_ )
for key, value in shuffled_kwargs.items():
if isinstance(A_ , A_ ):
lowercase_ = [value[i] for i in indices_per_size[len(A_ )]]
return shuffled_kwargs
| 710
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
a = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
a = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowercase_ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ = numpy_to_pil(UpperCAmelCase__ )
return images
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if images.ndim == 3:
lowercase_ = images[None, ...]
lowercase_ = (images * 2_5_5).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase_ = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
lowercase_ = [Image.fromarray(UpperCAmelCase__ ) for image in images]
return pil_images
| 650
| 0
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = []
lowercase_ = []
lowercase_ = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowercase_ = len(UpperCAmelCase__ ) if (len(UpperCAmelCase__ ) > 7) else 7
# Print table header for output
print(
"""Symbol""".center(8 ) , """Stack""".center(UpperCAmelCase__ ) , """Postfix""".center(UpperCAmelCase__ ) , sep=""" | """ , )
print("""-""" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(UpperCAmelCase__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(UpperCAmelCase__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(UpperCAmelCase__ ) == 0:
stack.append(UpperCAmelCase__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(UpperCAmelCase__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(UpperCAmelCase__ ) # push x to stack
print(
x.center(8 ) , ("""""".join(UpperCAmelCase__ )).ljust(UpperCAmelCase__ ) , ("""""".join(UpperCAmelCase__ )).ljust(UpperCAmelCase__ ) , sep=""" | """ , ) # Output in tabular format
while len(UpperCAmelCase__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
""" """.center(8 ) , ("""""".join(UpperCAmelCase__ )).ljust(UpperCAmelCase__ ) , ("""""".join(UpperCAmelCase__ )).ljust(UpperCAmelCase__ ) , sep=""" | """ , ) # Output in tabular format
return "".join(UpperCAmelCase__ ) # return Postfix as str
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(UpperCAmelCase__ ) ):
if infix[i] == "(":
lowercase_ = ')' # change "(" to ")"
elif infix[i] == ")":
lowercase_ = '(' # change ")" to "("
return (infix_2_postfix("""""".join(UpperCAmelCase__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
a = input('\nEnter an Infix Equation = ') # Input an Infix equation
a = ''''''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 711
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = (UnCLIPScheduler,)
def UpperCAmelCase__ ( self : int , **UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = {
"""num_train_timesteps""": 1_000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**UpperCamelCase__ )
return config
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(variance_type="""fixed_small_log""" )
lowercase_ = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(variance_type="""learned_range""" )
lowercase_ = scheduler_class(**UpperCamelCase__ )
lowercase_ = 0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1e-5
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCamelCase__ )
lowercase_ = scheduler.timesteps
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
lowercase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(25 )
lowercase_ = scheduler.timesteps
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
lowercase_ = None
else:
lowercase_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase_ = scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
| 650
| 0
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class UpperCamelCase__ ( unittest.TestCase ):
def __init__( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Tuple=56 , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : str=99 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : List[Any]="gelu_new" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : int=16 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : str=4 , UpperCamelCase__ : Dict="block_sparse" , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Union[str, Any]=3 , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_attention_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_choices
lowercase_ = rescale_embeddings
lowercase_ = attention_type
lowercase_ = use_bias
lowercase_ = block_size
lowercase_ = num_random_blocks
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_attention_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
super().test_hidden_states_output()
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(__a )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase_ = self._prepare_for_class(__a , __a )
lowercase_ = model_class(__a )
@jax.jit
def model_jitted(UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Any ):
return model(input_ids=__a , attention_mask=__a , **__a )
with self.subTest("""JIT Enabled""" ):
lowercase_ = model_jitted(**__a ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowercase_ = model_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict=1e-5 , UpperCamelCase__ : str="outputs" , UpperCamelCase__ : int=None ):
'''simple docstring'''
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(__a , __a , __a , __a , __a , __a )
| 712
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
a = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the training data.'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the validation data.'} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1_5 , metadata={'help': 'Percent to split off of train for validation.'} )
__SCREAMING_SNAKE_CASE : int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} )
__SCREAMING_SNAKE_CASE : float = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = {}
if self.train_dir is not None:
lowercase_ = self.train_dir
if self.validation_dir is not None:
lowercase_ = self.validation_dir
lowercase_ = data_files if data_files else None
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = field(
default=__magic_name__ , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__magic_name__ )} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
__SCREAMING_SNAKE_CASE : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__SCREAMING_SNAKE_CASE : str = field(default=__magic_name__ , metadata={'help': 'Name or path of preprocessor config.'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={'help': 'Stride to use for the encoder.'} , )
class UpperCamelCase__ :
def __init__( self : Dict , UpperCamelCase__ : List[Any]=192 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : str=0.6 ):
'''simple docstring'''
lowercase_ = input_size
lowercase_ = mask_patch_size
lowercase_ = model_patch_size
lowercase_ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
lowercase_ = self.input_size // self.mask_patch_size
lowercase_ = self.mask_patch_size // self.model_patch_size
lowercase_ = self.rand_size**2
lowercase_ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : int ):
'''simple docstring'''
lowercase_ = np.random.permutation(self.token_count )[: self.mask_count]
lowercase_ = np.zeros(self.token_count , dtype=UpperCamelCase__ )
lowercase_ = 1
lowercase_ = mask.reshape((self.rand_size, self.rand_size) )
lowercase_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = torch.stack([example["""pixel_values"""] for example in examples] )
lowercase_ = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def UpperCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , UpperCAmelCase__ , UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase_ = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCAmelCase__ ) and data_args.train_val_split > 0.0:
lowercase_ = ds["""train"""].train_test_split(data_args.train_val_split )
lowercase_ = split["""train"""]
lowercase_ = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowercase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(UpperCAmelCase__ , """decoder_type""" ):
lowercase_ = """simmim"""
# adapt config
lowercase_ = model_args.image_size if model_args.image_size is not None else config.image_size
lowercase_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowercase_ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowercase_ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowercase_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowercase_ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowercase_ = AutoModelForMaskedImageModeling.from_config(UpperCAmelCase__ )
if training_args.do_train:
lowercase_ = ds["""train"""].column_names
else:
lowercase_ = ds["""validation"""].column_names
if data_args.image_column_name is not None:
lowercase_ = data_args.image_column_name
elif "image" in column_names:
lowercase_ = """image"""
elif "img" in column_names:
lowercase_ = """img"""
else:
lowercase_ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowercase_ = Compose(
[
Lambda(lambda UpperCAmelCase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowercase_ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(UpperCAmelCase__ ):
lowercase_ = [transforms(UpperCAmelCase__ ) for image in examples[image_column_name]]
lowercase_ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
lowercase_ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCAmelCase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
lowercase_ = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCAmelCase__ )
# Initialize our trainer
lowercase_ = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
lowercase_ = None
if training_args.resume_from_checkpoint is not None:
lowercase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ = last_checkpoint
lowercase_ = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase_ = trainer.evaluate()
trainer.log_metrics("""eval""" , UpperCAmelCase__ )
trainer.save_metrics("""eval""" , UpperCAmelCase__ )
# Write model card and (optionally) push to hub
lowercase_ = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase__ )
else:
trainer.create_model_card(**UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 650
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class UpperCamelCase__ ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Any = 'luke'
def __init__( self : str , UpperCamelCase__ : Tuple=50_267 , UpperCamelCase__ : Union[str, Any]=500_000 , UpperCamelCase__ : int=768 , UpperCamelCase__ : Union[str, Any]=256 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : List[Any]=3_072 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=512 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Optional[Any]=1e-12 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : Any=0 , UpperCamelCase__ : str=2 , **UpperCamelCase__ : int , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
lowercase_ = vocab_size
lowercase_ = entity_vocab_size
lowercase_ = hidden_size
lowercase_ = entity_emb_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = use_entity_aware_attention
lowercase_ = classifier_dropout
| 713
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : List[Any] = ['pixel_values']
def __init__( self : List[str] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = size if size is not None else {"""shortest_edge""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowercase_ = int((256 / 224) * size["""shortest_edge"""] )
lowercase_ = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
UpperCamelCase__ , size=(size_dict["""height"""], size_dict["""width"""]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[TensorType] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else self.crop_size
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
lowercase_ = [self.resize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_center_crop:
lowercase_ = [self.center_crop(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650
| 0
|
'''simple docstring'''
import os
from collections.abc import Iterator
def UpperCAmelCase_ ( UpperCAmelCase__ = "." ):
for dir_path, dir_names, filenames in os.walk(A__ ):
lowercase_ = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A__ )[1] in (".py", ".ipynb"):
yield os.path.join(A__ , A__ ).lstrip("""./""" )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
return F'''{i * " "}*''' if i else "\n##"
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A__ ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(A__ )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def UpperCAmelCase_ ( UpperCAmelCase__ = "." ):
lowercase_ = """"""
for filepath in sorted(good_file_paths(A__ ) ):
lowercase_ , lowercase_ = os.path.split(A__ )
if filepath != old_path:
lowercase_ = print_path(A__ , A__ )
lowercase_ = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase_ = F'''{filepath}/{filename}'''.replace(""" """ , """%20""" )
lowercase_ = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F'''{md_prefix(A__ )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('.')
| 714
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 650
| 0
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
return str(lowerCamelCase__ ) == str(lowerCamelCase__ )[::-1]
def UpperCAmelCase_ ( UpperCAmelCase__ ):
return int(lowerCamelCase__ ) + int(str(lowerCamelCase__ )[::-1] )
def UpperCAmelCase_ ( UpperCAmelCase__ = 1_0_0_0_0 ):
lowercase_ = []
for num in range(1 , lowerCamelCase__ ):
lowercase_ = 0
lowercase_ = num
while iterations < 5_0:
lowercase_ = sum_reverse(lowerCamelCase__ )
iterations += 1
if is_palindrome(lowerCamelCase__ ):
break
else:
lychrel_nums.append(lowerCamelCase__ )
return len(lowerCamelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 715
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
a = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
for attribute in key.split(""".""" ):
lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
if weight_type is not None:
lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape
else:
lowercase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase_ = value
elif weight_type == "weight_g":
lowercase_ = value
elif weight_type == "weight_v":
lowercase_ = value
elif weight_type == "bias":
lowercase_ = value
else:
lowercase_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = []
lowercase_ = fairseq_model.state_dict()
lowercase_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase_ = None
for name, value in fairseq_dict.items():
lowercase_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == """group""" , )
lowercase_ = True
elif name.split(""".""" )[0] == "proj":
lowercase_ = fairseq_model.proj
lowercase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase_ = True
if "*" in mapped_key:
lowercase_ = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2]
lowercase_ = mapped_key.replace("""*""" , UpperCAmelCase__ )
if "weight_g" in name:
lowercase_ = """weight_g"""
elif "weight_v" in name:
lowercase_ = """weight_v"""
elif "bias" in name:
lowercase_ = """bias"""
elif "weight" in name:
lowercase_ = """weight"""
else:
lowercase_ = None
set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = full_name.split("""conv_layers.""" )[-1]
lowercase_ = name.split(""".""" )
lowercase_ = int(items[0] )
lowercase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ , lowercase_ = emb.weight.shape
lowercase_ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ )
lowercase_ = emb.weight.data
return lin_layer
def UpperCAmelCase_ ( UpperCAmelCase__ ):
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.split(""" """ )[0] for line in lines]
lowercase_ = len(UpperCAmelCase__ )
lowercase_ = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(UpperCAmelCase__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
lowercase_ = WavaVecaConfig.from_pretrained(UpperCAmelCase__ )
lowercase_ = SpeechaTextaConfig.from_pretrained(
UpperCAmelCase__ , vocab_size=UpperCAmelCase__ , decoder_layers=UpperCAmelCase__ , do_stable_layer_norm=UpperCAmelCase__ )
lowercase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
lowercase_ = model[0].eval()
# set weights for wav2vec2 encoder
lowercase_ = WavaVecaModel(UpperCAmelCase__ )
lowercase_ = recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase__ )
lowercase_ = SpeechaTextaForCausalLM(UpperCAmelCase__ )
lowercase_ , lowercase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
lowercase_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowercase_ = SpeechEncoderDecoderModel(encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
lowercase_ = False
# add projection layer
lowercase_ = nn.Parameter(projection_layer.weight )
lowercase_ = nn.Parameter(projection_layer.bias )
lowercase_ = create_vocab_dict(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = SpeechaTextaTokenizer(os.path.join(UpperCAmelCase__ , """vocab.json""" ) )
tokenizer.save_pretrained(UpperCAmelCase__ )
lowercase_ = hf_wavavec.config.to_dict()
lowercase_ = tokenizer.pad_token_id
lowercase_ = tokenizer.bos_token_id
lowercase_ = tokenizer.eos_token_id
lowercase_ = """speech_to_text_2"""
lowercase_ = """wav2vec2"""
lowercase_ = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase__ )
hf_wavavec.save_pretrained(UpperCAmelCase__ )
feature_extractor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0_2_2_4, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 650
| 0
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a = logging.get_logger(__name__)
class UpperCamelCase__ ( enum.Enum ):
__SCREAMING_SNAKE_CASE : List[Any] = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
@add_end_docstrings(__magic_name__ )
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : List[str] = 'generated'
def __init__( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
super().__init__(*lowercase_ , **lowercase_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
lowercase_ = {}
if truncation is not None:
lowercase_ = truncation
lowercase_ = generate_kwargs
lowercase_ = {}
if return_tensors is not None and return_type is None:
lowercase_ = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase_ = return_type
if clean_up_tokenization_spaces is not None:
lowercase_ = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase_ = self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
if len(lowercase_ ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
lowercase_ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
return True
def UpperCAmelCase__ ( self : Dict , *UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
lowercase_ = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , lowercase_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
lowercase_ = ([prefix + arg for arg in args[0]],)
lowercase_ = True
elif isinstance(args[0] , lowercase_ ):
lowercase_ = (prefix + args[0],)
lowercase_ = False
else:
raise ValueError(
F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
lowercase_ = self.tokenizer(*lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[Any] , *UpperCamelCase__ : Any , **UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = super().__call__(*lowercase_ , **lowercase_ )
if (
isinstance(args[0] , lowercase_ )
and all(isinstance(lowercase_ , lowercase_ ) for el in args[0] )
and all(len(lowercase_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=TruncationStrategy.DO_NOT_TRUNCATE , **UpperCamelCase__ : Dict ):
'''simple docstring'''
lowercase_ = self._parse_and_tokenize(lowercase_ , truncation=lowercase_ , **lowercase_ )
return inputs
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : str , **UpperCamelCase__ : str ):
'''simple docstring'''
if self.framework == "pt":
lowercase_ , lowercase_ = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
lowercase_ , lowercase_ = tf.shape(model_inputs["""input_ids"""] ).numpy()
lowercase_ = generate_kwargs.get("""min_length""" , self.model.config.min_length )
lowercase_ = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(lowercase_ , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
lowercase_ = self.model.generate(**lowercase_ , **lowercase_ )
lowercase_ = output_ids.shape[0]
if self.framework == "pt":
lowercase_ = output_ids.reshape(lowercase_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowercase_ = tf.reshape(lowercase_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : int=ReturnType.TEXT , UpperCamelCase__ : int=False ):
'''simple docstring'''
lowercase_ = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase_ = {F'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
lowercase_ = {
F'''{self.return_name}_text''': self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
}
records.append(lowercase_ )
return records
@add_end_docstrings(__magic_name__ )
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : List[str] = 'summary'
def __call__( self : Optional[Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : Dict ):
'''simple docstring'''
return super().__call__(*lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
if max_length < min_length:
logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(__magic_name__ )
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'translation'
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCAmelCase__ ( self : Tuple , *UpperCamelCase__ : Any , UpperCamelCase__ : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[Any]=None ):
'''simple docstring'''
if getattr(self.tokenizer , """_build_translation_inputs""" , lowercase_ ):
return self.tokenizer._build_translation_inputs(
*lowercase_ , return_tensors=self.framework , truncation=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_ )
else:
return super()._parse_and_tokenize(*lowercase_ , truncation=lowercase_ )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ = super()._sanitize_parameters(**lowercase_ )
if src_lang is not None:
lowercase_ = src_lang
if tgt_lang is not None:
lowercase_ = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase_ = kwargs.get("""task""" , self.task )
lowercase_ = task.split("""_""" )
if task and len(lowercase_ ) == 4:
# translation, XX, to YY
lowercase_ = items[1]
lowercase_ = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : str ):
'''simple docstring'''
return super().__call__(*lowercase_ , **lowercase_ )
| 716
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
# TODO Update this
a = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Tuple = 'esm'
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Dict=3_072 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=1_026 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Dict=1e-12 , UpperCamelCase__ : List[str]="absolute" , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
lowercase_ = emb_layer_norm_before
lowercase_ = token_dropout
lowercase_ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
lowercase_ = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = EsmFoldConfig(**UpperCamelCase__ )
lowercase_ = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
lowercase_ = get_default_vocab_list()
else:
lowercase_ = vocab_list
else:
lowercase_ = None
lowercase_ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , UpperCamelCase__ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
lowercase_ = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : "TrunkConfig" = None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if self.trunk is None:
lowercase_ = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
lowercase_ = TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = asdict(self )
lowercase_ = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = 48
__SCREAMING_SNAKE_CASE : int = 1024
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Optional[int] = 128
__SCREAMING_SNAKE_CASE : "StructureModuleConfig" = None
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.structure_module is None:
lowercase_ = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
lowercase_ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowercase_ = self.sequence_state_dim // self.sequence_head_width
lowercase_ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = asdict(self )
lowercase_ = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = 384
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 16
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 12
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : float = 0.1
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : int = 7
__SCREAMING_SNAKE_CASE : int = 10
__SCREAMING_SNAKE_CASE : float = 1e-8
__SCREAMING_SNAKE_CASE : float = 1e5
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return asdict(self )
def UpperCAmelCase_ ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 650
| 0
|
def UpperCAmelCase_ ( UpperCAmelCase__ = 1_0_0_0_0_0_0 ):
lowercase_ = set(range(3 , snake_case__ , 2 ) )
primes.add(2 )
for p in range(3 , snake_case__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case__ , snake_case__ ) ) )
lowercase_ = [float(snake_case__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(snake_case__ , limit + 1 , snake_case__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 717
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def UpperCAmelCase_ ( UpperCAmelCase__=None ):
if subparsers is not None:
lowercase_ = subparsers.add_parser("""env""" )
else:
lowercase_ = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=UpperCAmelCase__ , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = torch.__version__
lowercase_ = torch.cuda.is_available()
lowercase_ = is_xpu_available()
lowercase_ = is_npu_available()
lowercase_ = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCAmelCase__ ):
lowercase_ = load_config_from_file(args.config_file ).to_dict()
lowercase_ = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""PyTorch XPU available""": str(UpperCAmelCase__ ),
"""PyTorch NPU available""": str(UpperCAmelCase__ ),
"""System RAM""": F'''{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB''',
}
if pt_cuda_available:
lowercase_ = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
lowercase_ = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
else F'''\t{accelerate_config}'''
)
print(UpperCAmelCase__ )
lowercase_ = accelerate_config
return info
def UpperCAmelCase_ ( ):
lowercase_ = env_command_parser()
lowercase_ = parser.parse_args()
env_command(UpperCAmelCase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 650
| 0
|
from sklearn.metrics import fa_score
import datasets
a = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
a = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
a = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : List[str]="binary" , UpperCamelCase__ : str=None ):
'''simple docstring'''
lowercase_ = fa_score(
UpperCAmelCase__ , UpperCAmelCase__ , labels=UpperCAmelCase__ , pos_label=UpperCAmelCase__ , average=UpperCAmelCase__ , sample_weight=UpperCAmelCase__ )
return {"f1": float(UpperCAmelCase__ ) if score.size == 1 else score}
| 718
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCamelCase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Tuple=30 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : int=32 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=2 , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = scope
lowercase_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase_ = (image_size // patch_size) ** 2
lowercase_ = num_patches + 2
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
'''simple docstring'''
lowercase_ = DeiTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = DeiTForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = DeiTForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : str = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = DeiTModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=False ):
'''simple docstring'''
lowercase_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(UpperCamelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowercase_ = model(**UpperCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase_ = False
lowercase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase_ = model_class(UpperCamelCase__ )
model.gradient_checkpointing_enable()
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowercase_ = model(**UpperCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(UpperCamelCase__ ),
*get_values(UpperCamelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
lowercase_ = problem_type["""title"""]
lowercase_ = problem_type["""num_labels"""]
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if problem_type["num_labels"] > 1:
lowercase_ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowercase_ = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=UpperCamelCase__ ) as warning_list:
lowercase_ = model(**UpperCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DeiTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCAmelCase_ ( ):
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
UpperCamelCase__ )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCamelCase__ )
# verify the logits
lowercase_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowercase_ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
lowercase_ = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ )
| 650
| 0
|
def UpperCAmelCase_ ( UpperCAmelCase__ = 1_0_0 ):
lowercase_ = set()
lowercase_ = 0
lowercase_ = n + 1 # maximum limit
for a in range(2 , UpperCAmelCase__ ):
for b in range(2 , UpperCAmelCase__ ):
lowercase_ = a**b # calculates the current power
collect_powers.add(UpperCAmelCase__ ) # adds the result to the set
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 719
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 650
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCamelCase__ ( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'trocr'
__SCREAMING_SNAKE_CASE : Any = ['past_key_values']
__SCREAMING_SNAKE_CASE : List[Any] = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : Dict , UpperCamelCase__ : str=50_265 , UpperCamelCase__ : List[str]=1_024 , UpperCamelCase__ : int=12 , UpperCamelCase__ : List[str]=16 , UpperCamelCase__ : Union[str, Any]=4_096 , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : List[str]=512 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : str=0 , UpperCamelCase__ : Optional[Any]=2 , **UpperCamelCase__ : Any , ):
'''simple docstring'''
lowercase_ = vocab_size
lowercase_ = d_model
lowercase_ = decoder_layers
lowercase_ = decoder_attention_heads
lowercase_ = decoder_ffn_dim
lowercase_ = activation_function
lowercase_ = max_position_embeddings
lowercase_ = dropout
lowercase_ = attention_dropout
lowercase_ = activation_dropout
lowercase_ = init_std
lowercase_ = decoder_layerdrop
lowercase_ = use_cache
lowercase_ = scale_embedding
lowercase_ = use_learned_position_embeddings
lowercase_ = layernorm_embedding
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
| 720
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizer
__SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizerFast
__SCREAMING_SNAKE_CASE : List[Any] = True
__SCREAMING_SNAKE_CASE : int = True
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 1_008 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
lowercase_ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCamelCase__ , f.name )
lowercase_ = XGLMTokenizer(f.name , keep_accents=UpperCamelCase__ )
lowercase_ = pickle.dumps(UpperCamelCase__ )
pickle.loads(UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(UpperCamelCase__ )
lowercase_ = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """Hello World!"""
lowercase_ = [2, 31_227, 4_447, 35]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
lowercase_ = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = {
"""input_ids""": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""facebook/xglm-564M""" , padding=UpperCamelCase__ , )
| 650
| 0
|
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = BigBirdConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
lowercase_ = BigBirdForQuestionAnswering(UpperCAmelCase__ )
else:
lowercase_ = BigBirdForPreTraining(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCAmelCase__ , UpperCAmelCase__ , is_trivia_qa=UpperCAmelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
a = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 721
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
a = None
a = logging.get_logger(__name__)
a = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
a = {
't5-small': 5_1_2,
't5-base': 5_1_2,
't5-large': 5_1_2,
't5-3b': 5_1_2,
't5-11b': 5_1_2,
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : Dict = TaTokenizer
__SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : int , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : Optional[Any]="<pad>" , UpperCamelCase__ : Union[str, Any]=100 , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowercase_ = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase_ = len(set(filter(lambda UpperCamelCase__ : bool("""extra_id_""" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
lowercase_ = vocab_file
lowercase_ = False if not self.vocab_file else True
lowercase_ = extra_ids
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCamelCase__ , )
return max_model_length
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase_ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(R"""<extra_id_\d+>""" , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 650
| 0
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCamelCase__ :
def __init__( self : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Any=10 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : int=32 * 8 , UpperCamelCase__ : Union[str, Any]=32 * 8 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Any=64 , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = is_training
lowercase_ = use_auxiliary_loss
lowercase_ = num_queries
lowercase_ = num_channels
lowercase_ = min_size
lowercase_ = max_size
lowercase_ = num_labels
lowercase_ = hidden_dim
lowercase_ = hidden_dim
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCAmelCase__ )
lowercase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCAmelCase__ )
lowercase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCAmelCase__ ) > 0.5
).float()
lowercase_ = (torch.rand((self.batch_size, self.num_labels) , device=UpperCAmelCase__ ) > 0.5).long()
lowercase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowercase_ = self.num_queries
lowercase_ = self.num_labels
lowercase_ = [1, 1, 1, 1]
lowercase_ = self.num_channels
lowercase_ = 64
lowercase_ = 128
lowercase_ = self.hidden_dim
lowercase_ = self.hidden_dim
lowercase_ = self.hidden_dim
return config
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
lowercase_ = output.encoder_hidden_states
lowercase_ = output.pixel_decoder_hidden_states
lowercase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) , config.decoder_layers )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int=False ):
'''simple docstring'''
with torch.no_grad():
lowercase_ = MaskaFormerModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase_ = model(pixel_values=UpperCAmelCase__ , pixel_mask=UpperCAmelCase__ )
lowercase_ = model(UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ):
'''simple docstring'''
lowercase_ = MaskaFormerForUniversalSegmentation(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
def comm_check_on_output(UpperCamelCase__ : Union[str, Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowercase_ = model(pixel_values=UpperCAmelCase__ , pixel_mask=UpperCAmelCase__ )
lowercase_ = model(UpperCAmelCase__ )
comm_check_on_output(UpperCAmelCase__ )
lowercase_ = model(
pixel_values=UpperCAmelCase__ , pixel_mask=UpperCAmelCase__ , mask_labels=UpperCAmelCase__ , class_labels=UpperCAmelCase__ )
comm_check_on_output(UpperCAmelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Any = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = MaskaFormerModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(UpperCAmelCase__ , **UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*UpperCAmelCase__ )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`""" )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase__ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
@slow
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowercase_ = MaskaFormerModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = (self.model_tester.min_size,) * 2
lowercase_ = {
'''pixel_values''': torch.randn((2, 3, *size) , device=UpperCAmelCase__ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=UpperCAmelCase__ ),
'''class_labels''': torch.zeros(2 , 10 , device=UpperCAmelCase__ ).long(),
}
lowercase_ = self.model_tester.get_config()
lowercase_ = MaskaFormerForUniversalSegmentation(UpperCAmelCase__ ).to(UpperCAmelCase__ )
lowercase_ = model(**UpperCAmelCase__ )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(UpperCAmelCase__ , **UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase__ ).to(UpperCAmelCase__ )
lowercase_ = model(**UpperCAmelCase__ , output_attentions=UpperCAmelCase__ )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase_ = self.all_model_classes[1]
lowercase_ = self.model_tester.prepare_config_and_inputs()
lowercase_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.train()
lowercase_ = model(UpperCAmelCase__ , mask_labels=UpperCAmelCase__ , class_labels=UpperCAmelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.all_model_classes[1]
lowercase_ = self.model_tester.prepare_config_and_inputs()
lowercase_ = True
lowercase_ = True
lowercase_ = model_class(UpperCAmelCase__ ).to(UpperCAmelCase__ )
model.train()
lowercase_ = model(UpperCAmelCase__ , mask_labels=UpperCAmelCase__ , class_labels=UpperCAmelCase__ )
lowercase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowercase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCAmelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a = 1E-4
def UpperCAmelCase_ ( ):
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(UpperCAmelCase__ )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(UpperCAmelCase__ , return_tensors="""pt""" ).to(UpperCAmelCase__ )
lowercase_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase__ )
lowercase_ = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(UpperCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
lowercase_ = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(UpperCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
lowercase_ = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(UpperCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(UpperCAmelCase__ ).eval()
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(UpperCAmelCase__ , return_tensors="""pt""" ).to(UpperCAmelCase__ )
lowercase_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase__ )
# masks_queries_logits
lowercase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowercase_ = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
lowercase_ = torch.tensor(UpperCAmelCase__ ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
# class_queries_logits
lowercase_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowercase_ = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(UpperCAmelCase__ ).eval()
lowercase_ = self.default_image_processor
lowercase_ = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
lowercase_ = inputs['''pixel_values'''].to(UpperCAmelCase__ )
lowercase_ = [el.to(UpperCAmelCase__ ) for el in inputs['''mask_labels''']]
lowercase_ = [el.to(UpperCAmelCase__ ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase__ )
self.assertTrue(outputs.loss is not None )
| 700
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionDiffEditPipeline
__SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : Any = frozenset([] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , )
lowercase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
lowercase_ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_zero=UpperCamelCase__ , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase_ = CLIPTextModel(UpperCamelCase__ )
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Any=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
lowercase_ = self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe_loaded(**UpperCamelCase__ )[0]
lowercase_ = np.abs(output - output_loaded ).max()
self.assertLess(UpperCamelCase__ , 1e-4 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_mask_inputs(UpperCamelCase__ )
lowercase_ = pipe.generate_mask(**UpperCamelCase__ )
lowercase_ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowercase_ = np.array([0] * 9 )
lowercase_ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
lowercase_ = DPMSolverMultistepScheduler(**UpperCamelCase__ )
lowercase_ = DPMSolverMultistepInverseScheduler(**UpperCamelCase__ )
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
lowercase_ = raw_image.convert("""RGB""" ).resize((768, 768) )
lowercase_ = raw_image
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase_ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ , num_inference_steps=25 , ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 650
| 0
|
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = {
'facebook/mask2former-swin-small-coco-instance': (
'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = "mask2former"
__SCREAMING_SNAKE_CASE : Union[str, Any] = ["swin"]
__SCREAMING_SNAKE_CASE : Optional[Any] = {"hidden_size": "hidden_dim"}
def __init__( self : Tuple , UpperCamelCase__ : Optional[Dict] = None , UpperCamelCase__ : int = 256 , UpperCamelCase__ : int = 256 , UpperCamelCase__ : int = 256 , UpperCamelCase__ : int = 1_024 , UpperCamelCase__ : str = "relu" , UpperCamelCase__ : int = 6 , UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 8 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : int = 2_048 , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 4 , UpperCamelCase__ : int = 255 , UpperCamelCase__ : int = 100 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 2.0 , UpperCamelCase__ : float = 5.0 , UpperCamelCase__ : float = 5.0 , UpperCamelCase__ : int = 12_544 , UpperCamelCase__ : float = 3.0 , UpperCamelCase__ : float = 0.75 , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : bool = True , UpperCamelCase__ : List[int] = [4, 8, 16, 32] , UpperCamelCase__ : bool = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
lowercase_ = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_lowerCamelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowercase_ = backbone_config.pop("""model_type""" )
lowercase_ = CONFIG_MAPPING[backbone_model_type]
lowercase_ = config_class.from_dict(_lowerCamelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {",".join(self.backbones_supported )}''' )
lowercase_ = backbone_config
lowercase_ = feature_size
lowercase_ = mask_feature_size
lowercase_ = hidden_dim
lowercase_ = encoder_feedforward_dim
lowercase_ = activation_function
lowercase_ = encoder_layers
lowercase_ = decoder_layers
lowercase_ = num_attention_heads
lowercase_ = dropout
lowercase_ = dim_feedforward
lowercase_ = pre_norm
lowercase_ = enforce_input_projection
lowercase_ = common_stride
lowercase_ = ignore_value
lowercase_ = num_queries
lowercase_ = no_object_weight
lowercase_ = class_weight
lowercase_ = mask_weight
lowercase_ = dice_weight
lowercase_ = train_num_points
lowercase_ = oversample_ratio
lowercase_ = importance_sample_ratio
lowercase_ = init_std
lowercase_ = init_xavier_std
lowercase_ = use_auxiliary_loss
lowercase_ = feature_strides
lowercase_ = output_auxiliary_logits
lowercase_ = decoder_layers
super().__init__(**_lowerCamelCase )
@classmethod
def UpperCAmelCase__ ( cls : Dict , UpperCamelCase__ : PretrainedConfig , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
return cls(
backbone_config=_lowerCamelCase , **_lowerCamelCase , )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = copy.deepcopy(self.__dict__ )
lowercase_ = self.backbone_config.to_dict()
lowercase_ = self.__class__.model_type
return output
| 701
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = ['pixel_values']
def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : int = 8 , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_pad
lowercase_ = pad_size
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ )
lowercase_ = (old_height // size + 1) * size - old_height
lowercase_ = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase__ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_pad if do_pad is not None else self.do_pad
lowercase_ = pad_size if pad_size is not None else self.pad_size
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_pad:
lowercase_ = [self.pad(UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
a = None
a = logging.get_logger(__name__)
a = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
a = {
'google/bigbird-roberta-base': 4_0_9_6,
'google/bigbird-roberta-large': 4_0_9_6,
'google/bigbird-base-trivia-itc': 4_0_9_6,
}
a = '▁'
class UpperCamelCase__ ( lowerCamelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = BigBirdTokenizer
__SCREAMING_SNAKE_CASE : Any = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : Optional[Any] = []
def __init__( self : List[Any] , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Any="<unk>" , UpperCamelCase__ : Optional[Any]="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Optional[Any]="<pad>" , UpperCamelCase__ : Union[str, Any]="[SEP]" , UpperCamelCase__ : Tuple="[MASK]" , UpperCamelCase__ : List[str]="[CLS]" , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
lowercase_ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
lowercase_ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
lowercase_ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
lowercase_ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
lowercase_ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
lowercase_ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
lowercase_ = vocab_file
lowercase_ = False if not self.vocab_file else True
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] = None ):
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] = None , UpperCamelCase__ : Optional[Any] = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str = None ):
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ = os.path.join(
__lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 702
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError("""Input value must be an 'int' type""" )
lowercase_ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
| 0
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = tf.convert_to_tensor(
[
[
8.2_220_991, # 3rd highest value; idx. 0
-0.5_620_044,
5.23_229_752,
4.0_386_393,
-6.8_798_378,
-0.54_785_802,
-3.2_012_153,
2.92_777_176,
1.88_171_953,
7.35_341_276, # 5th highest value; idx. 9
8.43_207_833, # 2nd highest value; idx. 10
-9.85_711_836,
-5.96_209_236,
-1.13_039_161,
-7.1_115_294,
-0.8_369_633,
-5.3_186_408,
7.06_427_407,
0.81_369_344,
-0.82_023_817,
-5.9_179_796,
0.58_813_443,
-6.99_778_438,
4.71_551_189,
-0.18_771_637,
7.44_020_759, # 4th highest value; idx. 25
9.38_450_987, # 1st highest value; idx. 26
2.12_662_941,
-9.32_562_038,
2.35_652_522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_425_518,
4.53_139_238,
-5.57_510_464,
-6.28_030_699,
-7.19_529_503,
-4.02_122_551,
1.39_337_037,
-6.06_707_057,
1.59_480_517,
-9.643_119,
0.03_907_799,
0.67_231_762,
-8.88_206_726,
6.27_115_922, # 4th highest value; idx. 13
2.28_520_723,
4.82_767_506,
4.30_421_368,
8.8_275_313, # 2nd highest value; idx. 17
5.44_029_958, # 5th highest value; idx. 18
-4.4_735_794,
7.38_579_536, # 3rd highest value; idx. 20
-2.91_051_663,
2.61_946_077,
-2.5_674_762,
-9.48_959_302,
-4.02_922_645,
-1.35_416_918,
9.67_702_323, # 1st highest value; idx. 27
-5.89_478_553,
1.85_370_467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
lowercase_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
lowercase_ = tf.convert_to_tensor(
[8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above
lowercase_ = tf_top_k_top_p_filtering(_lowercase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
lowercase_ = output[output != -float("""inf""" )]
lowercase_ = tf.cast(
tf.where(tf.not_equal(_lowercase , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_lowercase , _lowercase , rtol=1e-12 )
tf.debugging.assert_equal(_lowercase , _lowercase )
@require_tf
class UpperCamelCase__ ( unittest.TestCase , UpperCAmelCase_ ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'AutoModelForCausalLM': TFAutoModelForCausalLM,
'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq,
'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM,
'AutoModelForVision2Seq': TFAutoModelForVisionaSeq,
'LogitsProcessorList': TFLogitsProcessorList,
'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor,
'create_tensor_fn': tf.convert_to_tensor,
'floats_tensor': floats_tensor,
'return_tensors': 'tf',
}
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowercase_ = 2
lowercase_ = 2
class UpperCamelCase__ ( tf.Module ):
def __init__( self : int , UpperCamelCase__ : str ):
'''simple docstring'''
super(_lowercase , self ).__init__()
lowercase_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=_lowercase , )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = self.model.generate(
input_ids=_lowercase , attention_mask=_lowercase , max_new_tokens=_lowercase , return_dict_in_generate=_lowercase , )
return {"sequences": outputs["sequences"]}
lowercase_ = [[2, 0], [102, 103]]
lowercase_ = [[1, 0], [1, 1]]
lowercase_ = DummyModel(model=_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_lowercase , _lowercase , signatures={"""serving_default""": dummy_model.serving} )
lowercase_ = tf.saved_model.load(_lowercase ).signatures["""serving_default"""]
for batch_size in range(1 , len(_lowercase ) + 1 ):
lowercase_ = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
lowercase_ = serving_func(**_lowercase )["""sequences"""]
lowercase_ = test_model.generate(**_lowercase , max_new_tokens=_lowercase )
tf.debugging.assert_equal(_lowercase , _lowercase )
@slow
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowercase_ = 1
lowercase_ = 2
class UpperCamelCase__ ( tf.Module ):
def __init__( self : Any , UpperCamelCase__ : Any ):
'''simple docstring'''
super(_lowercase , self ).__init__()
lowercase_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=_lowercase , )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = self.model.generate(
input_ids=_lowercase , attention_mask=_lowercase , max_new_tokens=_lowercase , return_dict_in_generate=_lowercase , )
return {"sequences": outputs["sequences"]}
lowercase_ = [[2], [102, 103]]
lowercase_ = [[1], [1, 1]]
lowercase_ = DummyModel(model=_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_lowercase , _lowercase , signatures={"""serving_default""": dummy_model.serving} )
lowercase_ = tf.saved_model.load(_lowercase ).signatures["""serving_default"""]
for input_row in range(len(_lowercase ) ):
lowercase_ = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
lowercase_ = serving_func(**_lowercase )["""sequences"""]
lowercase_ = test_model.generate(**_lowercase , max_new_tokens=_lowercase )
tf.debugging.assert_equal(_lowercase , _lowercase )
@slow
@require_tensorflow_text
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=_lowercase )
class UpperCamelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str ):
'''simple docstring'''
super().__init__()
lowercase_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_lowercase , """spiece.model""" ) , """rb""" ).read() )
lowercase_ = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Optional[Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
lowercase_ = self.tokenizer.tokenize(_lowercase )
lowercase_ , lowercase_ = text.pad_model_inputs(
_lowercase , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
lowercase_ = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase )
return self.tokenizer.detokenize(_lowercase )
lowercase_ = CompleteSentenceTransformer()
lowercase_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
lowercase_ = complete_model(_lowercase )
lowercase_ = tf.keras.Model(_lowercase , _lowercase )
keras_model.save(_lowercase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
lowercase_ = 14
lowercase_ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowercase_ = """Hello, my dog is cute and"""
lowercase_ = tokenizer(_lowercase , return_tensors="""tf""" )
lowercase_ = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowercase_ = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
lowercase_ = model.generate(**_lowercase , eos_token_id=_lowercase , **_lowercase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowercase_ = [638, 198]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
lowercase_ = model.generate(**_lowercase , eos_token_id=_lowercase , **_lowercase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
lowercase_ = """Hugging Face is a technology company based in New York and Paris."""
lowercase_ = bart_tokenizer(_lowercase , return_tensors="""tf""" ).input_ids
lowercase_ = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
lowercase_ = bart_model.generate(_lowercase ).numpy()
class UpperCamelCase__ ( UpperCAmelCase_ ):
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any]=None , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
return super().call(_lowercase , **_lowercase )
lowercase_ = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
lowercase_ = bart_model.generate(_lowercase , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(_lowercase , _lowercase ) )
class UpperCamelCase__ ( bart_model.model.encoder.__class__ ):
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return super().call(_lowercase , **_lowercase )
lowercase_ = FakeEncoder(bart_model.config , bart_model.model.shared )
lowercase_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowercase_ = bart_model.generate(_lowercase ).numpy()
with self.assertRaises(_lowercase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_lowercase , foo="""bar""" )
| 703
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ):
@register_to_config
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : bool = False , ):
'''simple docstring'''
super().__init__()
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = False
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
lowercase_ = TaConfig(
vocab_size=UpperCamelCase__ , d_model=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_kv=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , feed_forward_proj=UpperCamelCase__ , is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , )
lowercase_ = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
lowercase_ = TaBlock(UpperCamelCase__ )
self.encoders.append(UpperCamelCase__ )
lowercase_ = TaLayerNorm(UpperCamelCase__ )
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = self.token_embedder(UpperCamelCase__ )
lowercase_ = encoder_input_tokens.shape[1]
lowercase_ = torch.arange(UpperCamelCase__ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase__ )
lowercase_ = self.dropout_pre(UpperCamelCase__ )
# inverted the attention mask
lowercase_ = encoder_input_tokens.size()
lowercase_ = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ )
for lyr in self.encoders:
lowercase_ = lyr(UpperCamelCase__ , UpperCamelCase__ )[0]
lowercase_ = self.layer_norm(UpperCamelCase__ )
return self.dropout_post(UpperCamelCase__ ), encoder_inputs_mask
| 650
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.