code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = ['''pixel_values''']
def __init__( self : str , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PIL.Image.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Optional[int] = size if size is not None else {"""height""": 256, """width""": 256}
__lowercase : int = get_size_dict(__a )
__lowercase : Optional[Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase : List[Any] = get_size_dict(__a , param_name="""crop_size""" )
__lowercase : str = do_resize
__lowercase : List[str] = size
__lowercase : Optional[Any] = resample
__lowercase : Dict = do_center_crop
__lowercase : int = crop_size
__lowercase : str = do_rescale
__lowercase : List[str] = rescale_factor
__lowercase : int = do_normalize
__lowercase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase ( self : List[str] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PIL.Image.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : int , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Union[str, Any] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}" )
return resize(
__a , size=(size["""height"""], size["""width"""]) , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Dict = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def lowerCAmelCase ( self : List[str] , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ) -> str:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : List[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : List[Any] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : Any=None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : str , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : Any = do_resize if do_resize is not None else self.do_resize
__lowercase : Optional[Any] = resample if resample is not None else self.resample
__lowercase : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : int = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : str = image_mean if image_mean is not None else self.image_mean
__lowercase : Dict = image_std if image_std is not None else self.image_std
__lowercase : Any = size if size is not None else self.size
__lowercase : Optional[int] = get_size_dict(__a )
__lowercase : Dict = crop_size if crop_size is not None else self.crop_size
__lowercase : int = get_size_dict(__a , param_name="""crop_size""" )
__lowercase : Tuple = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__lowercase : Tuple = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowercase : Tuple = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__lowercase : Optional[int] = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__lowercase : List[Any] = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__lowercase : Dict = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__lowercase : Optional[Any] = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a )
| 649
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase : int = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def snake_case_ ( ):
__lowercase : List[Any] = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase : Union[str, Any] = get_sagemaker_input()
else:
__lowercase : str = get_cluster_input()
return config
def snake_case_ ( lowerCAmelCase_ : List[str]=None ):
if subparsers is not None:
__lowercase : Optional[int] = subparsers.add_parser("""config""" , description=lowerCAmelCase_ )
else:
__lowercase : List[str] = argparse.ArgumentParser("""Accelerate config command""" , description=lowerCAmelCase_ )
parser.add_argument(
"""--config_file""" , default=lowerCAmelCase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : Union[str, Any] = get_user_input()
if args.config_file is not None:
__lowercase : List[Any] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
__lowercase : Any = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(lowerCAmelCase_ )
else:
config.to_yaml_file(lowerCAmelCase_ )
print(F"accelerate configuration saved at {config_file}" )
def snake_case_ ( ):
__lowercase : str = config_command_parser()
__lowercase : str = parser.parse_args()
config_command(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 649
| 1
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Dict , __a : Union[str, Any]=13 , __a : Dict=7 , __a : Dict=True , __a : Dict=True , __a : Any=True , __a : List[str]=True , __a : int=99 , __a : Optional[int]=32 , __a : str=2 , __a : int=4 , __a : List[str]=37 , __a : Union[str, Any]="gelu" , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=0.1 , __a : List[Any]=512 , __a : int=16 , __a : Union[str, Any]=2 , __a : Union[str, Any]=0.02 , __a : List[str]=3 , __a : Dict=4 , __a : Optional[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = parent
__lowercase : Tuple = 13
__lowercase : Dict = 7
__lowercase : List[Any] = True
__lowercase : Tuple = True
__lowercase : List[str] = True
__lowercase : Any = True
__lowercase : Optional[int] = 99
__lowercase : str = 384
__lowercase : Optional[Any] = 2
__lowercase : Dict = 4
__lowercase : str = 37
__lowercase : Optional[int] = """gelu"""
__lowercase : int = 0.1
__lowercase : Union[str, Any] = 0.1
__lowercase : Tuple = 512
__lowercase : Tuple = 16
__lowercase : Optional[int] = 2
__lowercase : Optional[Any] = 0.02
__lowercase : Dict = 3
__lowercase : Union[str, Any] = 4
__lowercase : Tuple = 128
__lowercase : Optional[Any] = 2
__lowercase : int = 9
__lowercase : List[Any] = 1
__lowercase : Union[str, Any] = None
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[Any] = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
if self.use_token_type_ids:
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Optional[Any] = None
__lowercase : str = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Optional[int] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict , __a : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : str , __a : Union[str, Any] , __a : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TFConvBertModel(config=__a )
__lowercase : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase : Any = [input_ids, input_mask]
__lowercase : Dict = model(__a )
__lowercase : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Any , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Dict , __a : str ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = TFConvBertForMaskedLM(config=__a )
__lowercase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : Any = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : int , __a : Any , __a : Optional[int] , __a : int , __a : int , __a : List[Any] , __a : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.num_labels
__lowercase : List[Any] = TFConvBertForSequenceClassification(config=__a )
__lowercase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[str] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] , __a : Any , __a : Optional[Any] , __a : int , __a : Optional[int] , __a : Tuple , __a : int , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = self.num_choices
__lowercase : Dict = TFConvBertForMultipleChoice(config=__a )
__lowercase : List[str] = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : int = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowercase : Dict = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[str] , __a : str , __a : List[str] , __a : List[str] , __a : List[str] , __a : Any , __a : Tuple , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Tuple = TFConvBertForTokenClassification(config=__a )
__lowercase : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : str = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , __a : Optional[int] , __a : List[str] , __a : Optional[Any] , __a : int , __a : Tuple , __a : Any , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = TFConvBertForQuestionAnswering(config=__a )
__lowercase : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[Any] = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : int = config_and_inputs
__lowercase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A : str = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Union[str, Any] = False
_A : List[str] = False
_A : Dict = False
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : int = TFConvBertModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Union[str, Any] = True
__lowercase : List[Any] = True
if hasattr(__a , """use_cache""" ):
__lowercase : Optional[Any] = True
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : int = getattr(self.model_tester , """key_length""" , __a )
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Tuple = model_class(__a )
__lowercase : Tuple = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
__lowercase : List[Any] = os.path.join(__a , """saved_model""" , """1""" )
__lowercase : str = tf.keras.models.load_model(__a )
__lowercase : Optional[int] = model(__a )
if self.is_encoder_decoder:
__lowercase : Union[str, Any] = outputs["""encoder_hidden_states"""]
__lowercase : Union[str, Any] = outputs["""encoder_attentions"""]
else:
__lowercase : Union[str, Any] = outputs["""hidden_states"""]
__lowercase : List[str] = outputs["""attentions"""]
self.assertEqual(len(__a ) , __a )
__lowercase : List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : List[str] = True
__lowercase : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
__lowercase : Optional[int] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : List[str] = getattr(self.model_tester , """key_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """key_length""" , __a )
def check_decoder_attentions_output(__a : List[str] ):
__lowercase : Union[str, Any] = len(__a )
self.assertEqual(out_len % 2 , 0 )
__lowercase : Any = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a : str ):
__lowercase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase : int = True
__lowercase : Any = False
__lowercase : List[Any] = model_class(__a )
__lowercase : Tuple = model(self._prepare_for_class(__a , __a ) )
__lowercase : Dict = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
__lowercase : Any = model_class(__a )
__lowercase : List[str] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase : Dict = True
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
__lowercase : List[str] = True
__lowercase : List[Any] = True
__lowercase : Any = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
__lowercase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase : Tuple = model(__a )[0]
__lowercase : Any = [1, 6, 768]
self.assertEqual(output.shape , __a )
__lowercase : Optional[Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 649
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] | None = None ):
__lowercase : Tuple = word_bank or []
# create a table
__lowercase : int = len(lowerCAmelCase_ ) + 1
__lowercase : list[list[list[str]]] = []
for _ in range(lowerCAmelCase_ ):
table.append([] )
# seed value
__lowercase : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCAmelCase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCAmelCase_ )] == word:
__lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCAmelCase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCAmelCase_ )]:
combination.reverse()
return table[len(lowerCAmelCase_ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 649
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Dict = '''beit'''
def __init__( self : str , __a : int=8192 , __a : Tuple=768 , __a : Optional[Any]=12 , __a : List[Any]=12 , __a : Dict=3072 , __a : int="gelu" , __a : str=0.0 , __a : List[str]=0.0 , __a : List[Any]=0.02 , __a : List[str]=1E-12 , __a : Tuple=224 , __a : List[str]=16 , __a : Optional[Any]=3 , __a : Dict=False , __a : Union[str, Any]=False , __a : str=False , __a : Any=False , __a : Union[str, Any]=0.1 , __a : List[str]=0.1 , __a : Any=True , __a : int=[3, 5, 7, 11] , __a : str=[1, 2, 3, 6] , __a : int=True , __a : Tuple=0.4 , __a : int=256 , __a : int=1 , __a : Optional[Any]=False , __a : Dict=255 , **__a : int , ) -> List[Any]:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Optional[int] = vocab_size
__lowercase : List[Any] = hidden_size
__lowercase : Union[str, Any] = num_hidden_layers
__lowercase : int = num_attention_heads
__lowercase : Union[str, Any] = intermediate_size
__lowercase : int = hidden_act
__lowercase : Optional[int] = hidden_dropout_prob
__lowercase : Dict = attention_probs_dropout_prob
__lowercase : Union[str, Any] = initializer_range
__lowercase : Optional[int] = layer_norm_eps
__lowercase : Dict = image_size
__lowercase : Any = patch_size
__lowercase : List[str] = num_channels
__lowercase : Any = use_mask_token
__lowercase : Optional[Any] = use_absolute_position_embeddings
__lowercase : str = use_relative_position_bias
__lowercase : List[Any] = use_shared_relative_position_bias
__lowercase : Tuple = layer_scale_init_value
__lowercase : int = drop_path_rate
__lowercase : Optional[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowercase : Optional[Any] = out_indices
__lowercase : List[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowercase : int = use_auxiliary_head
__lowercase : Any = auxiliary_loss_weight
__lowercase : Any = auxiliary_channels
__lowercase : List[Any] = auxiliary_num_convs
__lowercase : str = auxiliary_concat_input
__lowercase : Optional[int] = semantic_loss_ignore_index
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = version.parse('''1.11''' )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase ( self : Tuple ) -> float:
"""simple docstring"""
return 1E-4
| 649
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(1 ) != 0 )
def snake_case_ ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 649
| 1
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : str = len(lowerCAmelCase_ )
__lowercase : Any = len(lowerCAmelCase_ )
__lowercase : List[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowercase : Dict = True
for i in range(lowerCAmelCase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowercase : Optional[Any] = True
if a[i].islower():
__lowercase : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : int = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 649
| 1
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__lowercase : Union[str, Any] = FlaxDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=__a , cache_dir=__a )
__lowercase : Optional[Any] = [t[-1] for t in os.walk(os.path.join(__a , os.listdir(__a )[0] , """snapshots""" ) )]
__lowercase : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(""".bin""" ) for f in files )
@slow
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=__a )
__lowercase : Tuple = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
__lowercase : Union[str, Any] = jax.random.PRNGKey(0 )
__lowercase : str = 4
__lowercase : str = jax.device_count()
__lowercase : int = num_samples * [prompt]
__lowercase : str = pipeline.prepare_inputs(__a )
# shard inputs and rng
__lowercase : List[Any] = replicate(__a )
__lowercase : Tuple = jax.random.split(__a , __a )
__lowercase : List[str] = shard(__a )
__lowercase : List[Any] = pipeline(__a , __a , __a , __a , jit=__a ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1E-3
assert np.abs(np.abs(__a , dtype=np.floataa ).sum() - 49947.875 ) < 5E-1
__lowercase : Dict = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__a ) == num_samples
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase : Any = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""flax""" , safety_checker=__a )
__lowercase : Any = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
__lowercase : Any = jax.random.PRNGKey(0 )
__lowercase : Union[str, Any] = 50
__lowercase : Optional[Any] = jax.device_count()
__lowercase : Tuple = num_samples * [prompt]
__lowercase : List[str] = pipeline.prepare_inputs(__a )
# shard inputs and rng
__lowercase : Optional[Any] = replicate(__a )
__lowercase : Optional[Any] = jax.random.split(__a , __a )
__lowercase : Union[str, Any] = shard(__a )
__lowercase : Optional[int] = pipeline(__a , __a , __a , __a , jit=__a ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1E-3
assert np.abs((np.abs(__a , dtype=np.floataa ).sum() - 2383808.2) ) < 5E-1
def lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase , __lowercase : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=__a )
__lowercase : Optional[Any] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
__lowercase : int = jax.random.PRNGKey(0 )
__lowercase : Optional[int] = 50
__lowercase : List[str] = jax.device_count()
__lowercase : Tuple = num_samples * [prompt]
__lowercase : List[str] = pipeline.prepare_inputs(__a )
# shard inputs and rng
__lowercase : List[str] = replicate(__a )
__lowercase : Optional[int] = jax.random.split(__a , __a )
__lowercase : List[str] = shard(__a )
__lowercase : Union[str, Any] = pipeline(__a , __a , __a , __a , jit=__a ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(__a , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa )
__lowercase : Optional[Any] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
__lowercase : Union[str, Any] = jax.random.PRNGKey(0 )
__lowercase : List[Any] = 50
__lowercase : Union[str, Any] = jax.device_count()
__lowercase : Union[str, Any] = num_samples * [prompt]
__lowercase : Optional[int] = pipeline.prepare_inputs(__a )
# shard inputs and rng
__lowercase : Dict = replicate(__a )
__lowercase : str = jax.random.split(__a , __a )
__lowercase : Dict = shard(__a )
__lowercase : Union[str, Any] = pipeline(__a , __a , __a , __a , jit=__a ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(__a , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , set_alpha_to_one=__a , steps_offset=1 , )
__lowercase , __lowercase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , scheduler=__a , safety_checker=__a , )
__lowercase : Optional[Any] = scheduler.create_state()
__lowercase : List[Any] = scheduler_state
__lowercase : Any = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
__lowercase : Optional[Any] = jax.random.PRNGKey(0 )
__lowercase : Dict = 50
__lowercase : Tuple = jax.device_count()
__lowercase : Union[str, Any] = num_samples * [prompt]
__lowercase : Any = pipeline.prepare_inputs(__a )
# shard inputs and rng
__lowercase : Union[str, Any] = replicate(__a )
__lowercase : Optional[Any] = jax.random.split(__a , __a )
__lowercase : Any = shard(__a )
__lowercase : str = pipeline(__a , __a , __a , __a , jit=__a ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1E-3
assert np.abs((np.abs(__a , dtype=np.floataa ).sum() - 2347693.5) ) < 5E-1
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : int = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
__lowercase : Optional[Any] = jax.device_count()
__lowercase : Optional[int] = num_samples * [prompt]
__lowercase : Union[str, Any] = jax.random.split(jax.random.PRNGKey(0 ) , __a )
__lowercase , __lowercase : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=__a , )
__lowercase : List[str] = replicate(__a )
__lowercase : List[str] = pipeline.prepare_inputs(__a )
__lowercase : Optional[int] = shard(__a )
__lowercase : Union[str, Any] = pipeline(__a , __a , __a , jit=__a ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
__lowercase : Tuple = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
__lowercase , __lowercase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=__a , use_memory_efficient_attention=__a , )
__lowercase : Dict = replicate(__a )
__lowercase : Dict = pipeline.prepare_inputs(__a )
__lowercase : List[Any] = shard(__a )
__lowercase : Optional[Any] = pipeline(__a , __a , __a , jit=__a ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
__lowercase : str = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 649
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCamelCase : Any = None
try:
import msvcrt
except ImportError:
lowerCamelCase : str = None
try:
import fcntl
except ImportError:
lowerCamelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
lowerCamelCase : Tuple = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
lowerCamelCase : Tuple = '''3.0.12'''
lowerCamelCase : Any = None
def snake_case_ ( ):
global _logger
__lowercase : List[str] = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , __a : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = lock_file
return None
def __str__( self : str ) -> Any:
"""simple docstring"""
__lowercase : Any = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = lock
return None
def __enter__( self : Dict ) -> Dict:
"""simple docstring"""
return self.lock
def __exit__( self : Optional[int] , __a : Dict , __a : Any , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.lock.release()
return None
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __a : Any , __a : Dict=-1 , __a : Optional[Any]=None ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowercase : Dict = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
__lowercase : Optional[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowercase : int = None
# The default timeout value.
__lowercase : Optional[int] = timeout
# We use this lock primarily for the lock counter.
__lowercase : Optional[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowercase : Union[str, Any] = 0
return None
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self._lock_file
@property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self._timeout
@timeout.setter
def lowerCAmelCase ( self : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = float(__a )
return None
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@property
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self._lock_file_fd is not None
def lowerCAmelCase ( self : Any , __a : Optional[Any]=None , __a : Union[str, Any]=0.05 ) -> List[str]:
"""simple docstring"""
if timeout is None:
__lowercase : Union[str, Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowercase : int = id(self )
__lowercase : Optional[Any] = self._lock_file
__lowercase : List[str] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowercase : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowercase : Optional[Any] = id(self )
__lowercase : str = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
__lowercase : List[str] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : List[str] , __a : str , __a : int , __a : List[Any] ) -> Tuple:
"""simple docstring"""
self.release()
return None
def __del__( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.release(force=__a )
return None
def lowerCAmelCase ( self : Tuple , __a : str , __a : int ) -> str:
"""simple docstring"""
__lowercase : List[Any] = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
__lowercase : int = os.path.dirname(__a )
__lowercase : List[str] = str(hash(__a ) )
__lowercase : Optional[Any] = filename[: max_length - len(__a ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__a , __a )
else:
return path
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : List[Any] , __a : Optional[int]=-1 , __a : Tuple=None ) -> List[Any]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
__lowercase : Tuple = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowercase : Tuple = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
__lowercase : Union[str, Any] = fd
return None
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self._lock_file_fd
__lowercase : int = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : List[str] , __a : Optional[Any] , __a : str=-1 , __a : List[str]=None ) -> Any:
"""simple docstring"""
__lowercase : Dict = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowercase : List[str] = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
__lowercase : str = fd
return None
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = self._lock_file_fd
__lowercase : List[str] = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowercase : Union[str, Any] = os.open(self._lock_file , __a )
except OSError:
pass
else:
__lowercase : Optional[int] = fd
return None
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
os.close(self._lock_file_fd )
__lowercase : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCamelCase : Optional[Any] = None
if msvcrt:
lowerCamelCase : List[Any] = WindowsFileLock
elif fcntl:
lowerCamelCase : List[Any] = UnixFileLock
else:
lowerCamelCase : Union[str, Any] = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 649
| 1
|
import colorsys
from PIL import Image # type: ignore
def snake_case_ ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : int ):
__lowercase : Union[str, Any] = x
__lowercase : Union[str, Any] = y
for step in range(lowerCAmelCase_ ): # noqa: B007
__lowercase : Union[str, Any] = a * a - b * b + x
__lowercase : str = 2 * a * b + y
__lowercase : Optional[Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case_ ( lowerCAmelCase_ : float ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case_ ( lowerCAmelCase_ : float ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowerCAmelCase_ , 1 , 1 ) )
def snake_case_ ( lowerCAmelCase_ : int = 800 , lowerCAmelCase_ : int = 600 , lowerCAmelCase_ : float = -0.6 , lowerCAmelCase_ : float = 0 , lowerCAmelCase_ : float = 3.2 , lowerCAmelCase_ : int = 50 , lowerCAmelCase_ : bool = True , ):
__lowercase : List[str] = Image.new("""RGB""" , (image_width, image_height) )
__lowercase : Dict = img.load()
# loop through the image-coordinates
for image_x in range(lowerCAmelCase_ ):
for image_y in range(lowerCAmelCase_ ):
# determine the figure-coordinates based on the image-coordinates
__lowercase : Optional[Any] = figure_width / image_width * image_height
__lowercase : Dict = figure_center_x + (image_x / image_width - 0.5) * figure_width
__lowercase : List[str] = figure_center_y + (image_y / image_height - 0.5) * figure_height
__lowercase : Optional[int] = get_distance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__lowercase : Optional[int] = get_color_coded_rgb(lowerCAmelCase_ )
else:
__lowercase : int = get_black_and_white_rgb(lowerCAmelCase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase : int = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 649
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''layoutlmv3'''
def __init__( self : Dict , __a : List[str]=50265 , __a : str=768 , __a : List[Any]=12 , __a : List[Any]=12 , __a : List[str]=3072 , __a : Optional[Any]="gelu" , __a : Optional[int]=0.1 , __a : List[Any]=0.1 , __a : Tuple=512 , __a : int=2 , __a : Any=0.02 , __a : Union[str, Any]=1E-5 , __a : List[str]=1 , __a : List[Any]=0 , __a : int=2 , __a : str=1024 , __a : str=128 , __a : List[Any]=128 , __a : Tuple=True , __a : Optional[int]=32 , __a : Any=128 , __a : List[Any]=64 , __a : Tuple=256 , __a : str=True , __a : int=True , __a : Optional[Any]=True , __a : Any=224 , __a : str=3 , __a : List[str]=16 , __a : Union[str, Any]=None , **__a : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
vocab_size=__a , hidden_size=__a , num_hidden_layers=__a , num_attention_heads=__a , intermediate_size=__a , hidden_act=__a , hidden_dropout_prob=__a , attention_probs_dropout_prob=__a , max_position_embeddings=__a , type_vocab_size=__a , initializer_range=__a , layer_norm_eps=__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a , )
__lowercase : int = max_ad_position_embeddings
__lowercase : Any = coordinate_size
__lowercase : Optional[Any] = shape_size
__lowercase : str = has_relative_attention_bias
__lowercase : int = rel_pos_bins
__lowercase : Union[str, Any] = max_rel_pos
__lowercase : str = has_spatial_attention_bias
__lowercase : str = rel_ad_pos_bins
__lowercase : List[Any] = max_rel_ad_pos
__lowercase : Tuple = text_embed
__lowercase : int = visual_embed
__lowercase : Tuple = input_size
__lowercase : Dict = num_channels
__lowercase : str = patch_size
__lowercase : Optional[int] = classifier_dropout
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = version.parse('''1.12''' )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return 12
def lowerCAmelCase ( self : List[Any] , __a : "ProcessorMixin" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , __a )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase : Tuple = processor.tokenizer.num_special_tokens_to_add(__a )
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
__lowercase : Union[str, Any] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase : Tuple = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase : Tuple = self._generate_dummy_images(__a , __a , __a , __a )
__lowercase : int = dict(
processor(
__a , text=__a , boxes=__a , return_tensors=__a , ) )
return inputs
| 649
| 1
|
def snake_case_ ( lowerCAmelCase_ : int ):
if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return 0
elif n == 2:
return 1
else:
__lowercase : Optional[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : Optional[int] = 0
__lowercase : Dict = 2
while digits < n:
index += 1
__lowercase : str = len(str(fibonacci(lowerCAmelCase_ ) ) )
return index
def snake_case_ ( lowerCAmelCase_ : int = 1000 ):
return fibonacci_digits_index(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 649
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : str = None , __a : uuid.UUID = None , __a : Any=None , __a : List[Any]=None ) -> List[Any]:
"""simple docstring"""
if not conversation_id:
__lowercase : Any = uuid.uuida()
if past_user_inputs is None:
__lowercase : Dict = []
if generated_responses is None:
__lowercase : Dict = []
__lowercase : uuid.UUID = conversation_id
__lowercase : List[str] = past_user_inputs
__lowercase : List[str] = generated_responses
__lowercase : Optional[str] = text
def __eq__( self : Dict , __a : Dict ) -> Any:
"""simple docstring"""
if not isinstance(__a , __a ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase ( self : List[str] , __a : str , __a : bool = False ) -> Dict:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
__lowercase : Optional[int] = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
__lowercase : Dict = text
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowercase : Dict = None
def lowerCAmelCase ( self : Optional[int] , __a : str ) -> List[Any]:
"""simple docstring"""
self.generated_responses.append(__a )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : int ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
__lowercase : Optional[Any] = """user""" if is_user else """bot"""
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
__a , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , *__a : int , **__a : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*__a , **__a )
if self.tokenizer.pad_token_id is None:
__lowercase : List[Any] = self.tokenizer.eos_token
def lowerCAmelCase ( self : Union[str, Any] , __a : int=None , __a : Tuple=None , __a : Any=None , **__a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = {}
__lowercase : Tuple = {}
__lowercase : List[str] = {}
if min_length_for_response is not None:
__lowercase : Dict = min_length_for_response
if minimum_tokens is not None:
__lowercase : Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
__lowercase : Union[str, Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowercase : Union[str, Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__a )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[int] , __a : Union[Conversation, List[Conversation]] , __a : Dict=0 , **__a : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = super().__call__(__a , num_workers=__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase ( self : Union[str, Any] , __a : Conversation , __a : Tuple=32 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowercase : List[Any] = self.tokenizer._build_conversation_input_ids(__a )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowercase : Tuple = self._legacy_parse_and_tokenize(__a )
if self.framework == "pt":
__lowercase : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowercase : List[str] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase ( self : Any , __a : Dict , __a : Any=10 , **__a : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowercase : List[Any] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
__lowercase : Any = max_length - minimum_tokens
__lowercase : int = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowercase : Dict = model_inputs["""attention_mask"""][:, -trim:]
__lowercase : Union[str, Any] = model_inputs.pop("""conversation""" )
__lowercase : Tuple = max_length
__lowercase : int = self.model.generate(**__a , **__a )
if self.model.config.is_encoder_decoder:
__lowercase : Optional[int] = 1
else:
__lowercase : str = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase ( self : int , __a : Tuple , __a : List[Any]=True ) -> List[str]:
"""simple docstring"""
__lowercase : int = model_outputs["""output_ids"""]
__lowercase : Union[str, Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , )
__lowercase : List[str] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(__a )
return conversation
def lowerCAmelCase ( self : int , __a : Conversation ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = self.tokenizer.eos_token_id
__lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) )
if len(__a ) > self.tokenizer.model_max_length:
__lowercase : List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 649
| 1
|
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
lowerCamelCase : Optional[int] = None
lowerCamelCase : str = {
'''7B''': 1_10_08,
'''13B''': 1_38_24,
'''30B''': 1_79_20,
'''65B''': 2_20_16,
'''70B''': 2_86_72,
}
lowerCamelCase : List[str] = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : Optional[Any]=256 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def snake_case_ ( lowerCAmelCase_ : Tuple ):
with open(lowerCAmelCase_ , """r""" ) as f:
return json.load(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] ):
with open(lowerCAmelCase_ , """w""" ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict=True ):
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowercase : List[Any] = os.path.join(lowerCAmelCase_ , """tmp""" )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowercase : str = read_json(os.path.join(lowerCAmelCase_ , """params.json""" ) )
__lowercase : Any = NUM_SHARDS[model_size]
__lowercase : int = params["""n_layers"""]
__lowercase : str = params["""n_heads"""]
__lowercase : List[str] = n_heads // num_shards
__lowercase : List[str] = params["""dim"""]
__lowercase : Union[str, Any] = dim // n_heads
__lowercase : Any = 10_000.0
__lowercase : Tuple = 1.0 / (base ** (torch.arange(0 , lowerCAmelCase_ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
__lowercase : Union[str, Any] = params["""n_kv_heads"""] # for GQA / MQA
__lowercase : Union[str, Any] = n_heads_per_shard // num_key_value_heads
__lowercase : Optional[Any] = dim // num_key_value_heads
else: # compatibility with other checkpoints
__lowercase : Optional[Any] = n_heads
__lowercase : Dict = n_heads_per_shard
__lowercase : List[Any] = dim
# permute for sliced rotary
def permute(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str=n_heads , lowerCAmelCase_ : Optional[int]=dim , lowerCAmelCase_ : List[Any]=dim ):
return w.view(lowerCAmelCase_ , dima // n_heads // 2 , 2 , lowerCAmelCase_ ).transpose(1 , 2 ).reshape(lowerCAmelCase_ , lowerCAmelCase_ )
print(F"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
__lowercase : Optional[Any] = torch.load(os.path.join(lowerCAmelCase_ , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
__lowercase : List[Any] = [
torch.load(os.path.join(lowerCAmelCase_ , F"consolidated.{i:02d}.pth" ) , map_location="""cpu""" )
for i in range(lowerCAmelCase_ )
]
__lowercase : List[str] = 0
__lowercase : Optional[int] = {"""weight_map""": {}}
for layer_i in range(lowerCAmelCase_ ):
__lowercase : Dict = F"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
__lowercase : int = {
F"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wq.weight"] ),
F"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wk.weight"] ),
F"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[F"layers.{layer_i}.attention.wv.weight"],
F"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[F"layers.{layer_i}.attention.wo.weight"],
F"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w1.weight"],
F"model.layers.{layer_i}.mlp.down_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w2.weight"],
F"model.layers.{layer_i}.mlp.up_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w3.weight"],
F"model.layers.{layer_i}.input_layernorm.weight": loaded[F"layers.{layer_i}.attention_norm.weight"],
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[F"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
__lowercase : Any = {
F"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
F"layers.{layer_i}.attention_norm.weight"
].clone(),
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
F"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
__lowercase : Dict = permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wq.weight"].view(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(lowerCAmelCase_ )
] , dim=0 , ).reshape(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowercase : Dict = permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wk.weight"].view(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(lowerCAmelCase_ )
] , dim=0 , ).reshape(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
__lowercase : List[Any] = torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wv.weight"].view(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(lowerCAmelCase_ )
] , dim=0 , ).reshape(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Any = torch.cat(
[loaded[i][F"layers.{layer_i}.attention.wo.weight"] for i in range(lowerCAmelCase_ )] , dim=1 )
__lowercase : Union[str, Any] = torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w1.weight"] for i in range(lowerCAmelCase_ )] , dim=0 )
__lowercase : Optional[Any] = torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w2.weight"] for i in range(lowerCAmelCase_ )] , dim=1 )
__lowercase : List[Any] = torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w3.weight"] for i in range(lowerCAmelCase_ )] , dim=0 )
__lowercase : List[str] = inv_freq
for k, v in state_dict.items():
__lowercase : int = filename
param_count += v.numel()
torch.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowercase : Any = F"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
__lowercase : List[Any] = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
__lowercase : List[Any] = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(lowerCAmelCase_ )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(lowerCAmelCase_ )] , dim=0 ),
}
for k, v in state_dict.items():
__lowercase : Union[str, Any] = filename
param_count += v.numel()
torch.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
# Write configs
__lowercase : List[str] = {"""total_size""": param_count * 2}
write_json(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , """pytorch_model.bin.index.json""" ) )
__lowercase : Union[str, Any] = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
__lowercase : str = params["""multiple_of"""] if """multiple_of""" in params else 256
__lowercase : Union[str, Any] = LlamaConfig(
hidden_size=lowerCAmelCase_ , intermediate_size=compute_intermediate_size(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=lowerCAmelCase_ , )
config.save_pretrained(lowerCAmelCase_ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
__lowercase : Union[str, Any] = LlamaForCausalLM.from_pretrained(lowerCAmelCase_ , torch_dtype=torch.floataa , low_cpu_mem_usage=lowerCAmelCase_ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(lowerCAmelCase_ , safe_serialization=lowerCAmelCase_ )
shutil.rmtree(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int ):
# Initialize the tokenizer based on the `spm` model
__lowercase : Optional[Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
__lowercase : Optional[Any] = tokenizer_class(lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
def snake_case_ ( ):
__lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=lowerCAmelCase_ , help="""Whether or not to save using `safetensors`.""" )
__lowercase : List[Any] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
__lowercase : List[Any] = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 649
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__a , """depth_multiplier""" ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Tuple , __a : str=13 , __a : Dict=3 , __a : List[Any]=32 , __a : Any=0.25 , __a : Any=8 , __a : Optional[int]=8 , __a : Optional[int]=6 , __a : Dict=32 , __a : Tuple=True , __a : List[Any]=True , __a : Optional[int]=True , __a : Tuple="relu6" , __a : Optional[Any]=1280 , __a : str=0.1 , __a : str=0.02 , __a : Optional[Any]=True , __a : Tuple=True , __a : Dict=10 , __a : Optional[Any]=None , ) -> Any:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Tuple = batch_size
__lowercase : Dict = num_channels
__lowercase : Optional[int] = image_size
__lowercase : int = depth_multiplier
__lowercase : str = depth_divisible_by
__lowercase : int = min_depth
__lowercase : Tuple = expand_ratio
__lowercase : Optional[int] = tf_padding
__lowercase : Dict = output_stride
__lowercase : Dict = first_layer_is_expansion
__lowercase : Optional[Any] = finegrained_output
__lowercase : str = hidden_act
__lowercase : Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__lowercase : Optional[int] = classifier_dropout_prob
__lowercase : int = use_labels
__lowercase : Optional[int] = is_training
__lowercase : Dict = num_labels
__lowercase : Tuple = initializer_range
__lowercase : Optional[Any] = scope
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : List[Any] = None
__lowercase : Optional[Any] = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = MobileNetVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase ( self : List[str] , __a : Optional[int] , __a : List[str] , __a : str , __a : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = self.num_labels
__lowercase : Dict = MobileNetVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __a : List[str] , __a : Tuple , __a : Any , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.num_labels
__lowercase : List[Any] = MobileNetVaForSemanticSegmentation(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase : str = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : List[str] = config_and_inputs
__lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : List[str] = False
_A : List[str] = False
_A : Optional[int] = False
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = MobileNetVaModelTester(self )
__lowercase : int = MobileNetVaConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : int = [*signature.parameters.keys()]
__lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__a : List[Any] , __a : Tuple , __a : List[str] ):
__lowercase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Tuple = outputs.hidden_states
__lowercase : str = 16
self.assertEqual(len(__a ) , __a )
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Any = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = MobileNetVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Tuple = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__a )
__lowercase : str = self.default_image_processor
__lowercase : Tuple = prepare_img()
__lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : str = model(**__a )
# verify the logits
__lowercase : Union[str, Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : int = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : Dict = model.to(__a )
__lowercase : Tuple = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[int] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Any = outputs.logits
# verify the logits
__lowercase : Dict = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __a )
__lowercase : str = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) )
| 649
| 1
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str ):
return [ord(lowerCAmelCase_ ) - 96 for elem in plain]
def snake_case_ ( lowerCAmelCase_ : list[int] ):
return "".join(chr(elem + 96 ) for elem in encoded )
def snake_case_ ( ):
__lowercase : str = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , lowerCAmelCase_ )
print("""Decoded:""" , decode(lowerCAmelCase_ ) )
if __name__ == "__main__":
main()
| 649
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def snake_case_ ( lowerCAmelCase_ : bool = True , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[str] ):
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
__lowercase : List[str] = False
if main_process_only:
__lowercase : Optional[int] = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ , disable=lowerCAmelCase_ )
| 649
| 1
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : str ):
__lowercase : Tuple = [x.strip() for x in open(lowerCAmelCase_ ).readlines()]
__lowercase : Dict = [x.strip() for x in open(lowerCAmelCase_ ).readlines()][: len(lowerCAmelCase_ )]
__lowercase : Tuple = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
if save_path is not None:
save_json(lowerCAmelCase_ , lowerCAmelCase_ , indent=lowerCAmelCase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 649
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : list[int] ):
if not nums:
return 0
__lowercase : Tuple = nums[0]
__lowercase : Tuple = 0
for num in nums[1:]:
__lowercase , __lowercase : List[str] = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : Any = {
'''Salesforce/blip-vqa-base''': '''https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json''',
'''Salesforce/blip-vqa-capfit-large''': (
'''https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-base''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-large''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'''
),
'''Salesforce/blip-itm-base-coco''': '''https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-large-coco''': '''https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-base-flikr''': '''https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json''',
'''Salesforce/blip-itm-large-flikr''': (
'''https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'''
),
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Union[str, Any] = '''blip_text_model'''
def __init__( self : List[str] , __a : Tuple=30524 , __a : str=768 , __a : int=768 , __a : Optional[Any]=3072 , __a : int=768 , __a : Optional[Any]=12 , __a : Optional[Any]=8 , __a : str=512 , __a : Dict="gelu" , __a : List[str]=1E-12 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : List[str]=0.02 , __a : Optional[int]=30522 , __a : Optional[Any]=2 , __a : Tuple=0 , __a : str=102 , __a : Union[str, Any]=True , __a : List[str]=True , **__a : List[str] , ) -> Any:
"""simple docstring"""
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , )
__lowercase : int = vocab_size
__lowercase : int = hidden_size
__lowercase : Dict = encoder_hidden_size
__lowercase : Optional[Any] = intermediate_size
__lowercase : List[Any] = projection_dim
__lowercase : List[str] = hidden_dropout_prob
__lowercase : Dict = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : Optional[Any] = max_position_embeddings
__lowercase : Optional[Any] = layer_norm_eps
__lowercase : Tuple = hidden_act
__lowercase : Dict = initializer_range
__lowercase : Optional[int] = attention_probs_dropout_prob
__lowercase : Tuple = is_decoder
__lowercase : Dict = use_cache
@classmethod
def lowerCAmelCase ( cls : Any , __a : Union[str, os.PathLike] , **__a : List[str] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__a )
__lowercase , __lowercase : int = cls.get_config_dict(__a , **__a )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
__lowercase : Any = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__a , **__a )
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Union[str, Any] = '''blip_vision_model'''
def __init__( self : List[Any] , __a : Union[str, Any]=768 , __a : Tuple=3072 , __a : int=512 , __a : Any=12 , __a : Dict=12 , __a : Any=384 , __a : List[Any]=16 , __a : Any="gelu" , __a : Union[str, Any]=1E-5 , __a : List[str]=0.0 , __a : Union[str, Any]=1E-10 , **__a : int , ) -> int:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Tuple = hidden_size
__lowercase : Any = intermediate_size
__lowercase : Tuple = projection_dim
__lowercase : Optional[int] = num_hidden_layers
__lowercase : Tuple = num_attention_heads
__lowercase : Optional[Any] = patch_size
__lowercase : int = image_size
__lowercase : int = initializer_range
__lowercase : int = attention_dropout
__lowercase : Dict = layer_norm_eps
__lowercase : List[str] = hidden_act
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] , __a : Union[str, os.PathLike] , **__a : Optional[int] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__a )
__lowercase , __lowercase : Union[str, Any] = cls.get_config_dict(__a , **__a )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
__lowercase : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__a , **__a )
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Dict = '''blip'''
_A : Any = True
def __init__( self : Any , __a : Union[str, Any]=None , __a : Optional[int]=None , __a : Optional[int]=512 , __a : Tuple=2.6592 , __a : Tuple=256 , **__a : str , ) -> Tuple:
"""simple docstring"""
super().__init__(**__a )
if text_config is None:
__lowercase : Optional[Any] = {}
logger.info("""`text_config` is `None`. Initializing the `BlipTextConfig` with default values.""" )
if vision_config is None:
__lowercase : str = {}
logger.info("""`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.""" )
__lowercase : Optional[Any] = BlipTextConfig(**__a )
__lowercase : Union[str, Any] = BlipVisionConfig(**__a )
__lowercase : Optional[int] = self.vision_config.hidden_size
__lowercase : List[str] = projection_dim
__lowercase : int = logit_scale_init_value
__lowercase : Optional[int] = 1.0
__lowercase : Dict = 0.02
__lowercase : Optional[Any] = image_text_hidden_size
@classmethod
def lowerCAmelCase ( cls : Any , __a : BlipTextConfig , __a : BlipVisionConfig , **__a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = copy.deepcopy(self.__dict__ )
__lowercase : Tuple = self.text_config.to_dict()
__lowercase : str = self.vision_config.to_dict()
__lowercase : Any = self.__class__.model_type
return output
| 649
|
lowerCamelCase : List[str] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 649
| 1
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : list[int] ):
if not nums:
return 0
__lowercase : Tuple = nums[0]
__lowercase : Tuple = 0
for num in nums[1:]:
__lowercase , __lowercase : List[str] = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Any=False ):
__lowercase : Any = """backbone.""" if is_semantic else """"""
__lowercase : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", """beit.embeddings.cls_token"""),
(F"{prefix}patch_embed.proj.weight", """beit.embeddings.patch_embeddings.projection.weight"""),
(F"{prefix}patch_embed.proj.bias", """beit.embeddings.patch_embeddings.projection.bias"""),
(F"{prefix}pos_embed", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : List[Any]=False ):
for i in range(config.num_hidden_layers ):
__lowercase : Tuple = """backbone.""" if is_semantic else """"""
# queries, keys and values
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
__lowercase : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
__lowercase : List[str] = in_proj_weight[
: config.hidden_size, :
]
__lowercase : Union[str, Any] = q_bias
__lowercase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
__lowercase : str = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
__lowercase : str = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
__lowercase : List[str] = gamma_a
__lowercase : Optional[int] = gamma_a
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
__lowercase : Tuple = dct.pop(lowerCAmelCase_ )
__lowercase : Tuple = val
def snake_case_ ( ):
__lowercase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : Any = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=False ):
__lowercase : Dict = False if """rvlcdip""" in checkpoint_url else True
__lowercase : Tuple = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase_ , use_mask_token=lowerCAmelCase_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__lowercase : Union[str, Any] = 1024
__lowercase : Optional[int] = 4096
__lowercase : List[Any] = 24
__lowercase : Dict = 16
# labels
if "rvlcdip" in checkpoint_url:
__lowercase : Optional[int] = 16
__lowercase : Any = """huggingface/label-files"""
__lowercase : Union[str, Any] = """rvlcdip-id2label.json"""
__lowercase : List[str] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Optional[int] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__lowercase : Optional[int] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )["""model"""]
__lowercase : Union[str, Any] = create_rename_keys(lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
# load HuggingFace model
__lowercase : Dict = BeitForMaskedImageModeling(lowerCAmelCase_ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
__lowercase : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" )
__lowercase : Optional[int] = encoding["""pixel_values"""]
__lowercase : str = model(lowerCAmelCase_ )
__lowercase : Tuple = outputs.logits
# verify logits
__lowercase : str = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowerCAmelCase_ ), "Shape of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
if has_lm_head:
__lowercase : Optional[Any] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
__lowercase : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase_ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
lowerCamelCase : List[str] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 649
| 1
|
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowerCamelCase : Optional[Any] = random.Random()
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple=1.0 , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[Any]=None ):
if rng is None:
__lowercase : Tuple = global_rng
__lowercase : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any , __a : Union[str, Any] , __a : Union[str, Any]=7 , __a : Optional[Any]=400 , __a : Union[str, Any]=2000 , __a : Optional[Any]=1 , __a : Optional[int]=0.0 , __a : List[Any]=16000 , __a : int=True , __a : Optional[Any]=True , ) -> int:
"""simple docstring"""
__lowercase : int = parent
__lowercase : List[Any] = batch_size
__lowercase : Dict = min_seq_length
__lowercase : List[Any] = max_seq_length
__lowercase : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowercase : List[str] = feature_size
__lowercase : List[Any] = padding_value
__lowercase : int = sampling_rate
__lowercase : Tuple = return_attention_mask
__lowercase : Optional[Any] = do_normalize
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase ( self : List[str] , __a : Union[str, Any]=False , __a : Optional[int]=False ) -> Tuple:
"""simple docstring"""
def _flatten(__a : Union[str, Any] ):
return list(itertools.chain(*__a ) )
if equal_length:
__lowercase : Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__lowercase : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowercase : str = [np.asarray(__a ) for x in speech_inputs]
return speech_inputs
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = WavaVecaFeatureExtractor
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = WavaVecaFeatureExtractionTester(self )
def lowerCAmelCase ( self : Union[str, Any] , __a : List[Any] ) -> List[Any]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(__a , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__a , axis=0 ) - 1 ) < 1E-3 ) )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowercase : str = [np.asarray(__a ) for speech_input in speech_inputs]
# Test not batched input
__lowercase : Union[str, Any] = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
__lowercase : List[Any] = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__a , __a , atol=1E-3 ) )
# Test batched
__lowercase : Tuple = feat_extract(__a , return_tensors="""np""" ).input_values
__lowercase : Dict = feat_extract(__a , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__a , __a ):
self.assertTrue(np.allclose(__a , __a , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__lowercase : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__lowercase : Optional[Any] = np.asarray(__a )
__lowercase : List[Any] = feat_extract(__a , return_tensors="""np""" ).input_values
__lowercase : int = feat_extract(__a , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__a , __a ):
self.assertTrue(np.allclose(__a , __a , atol=1E-3 ) )
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowercase : int = ["""longest""", """max_length""", """do_not_pad"""]
__lowercase : Dict = [None, 1600, None]
for max_length, padding in zip(__a , __a ):
__lowercase : List[Any] = feat_extract(__a , padding=__a , max_length=__a , return_tensors="""np""" )
__lowercase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase : Optional[int] = range(800 , 1400 , 200 )
__lowercase : Optional[Any] = [floats_list((1, x) )[0] for x in lengths]
__lowercase : str = ["""longest""", """max_length""", """do_not_pad"""]
__lowercase : Union[str, Any] = [None, 1600, None]
for max_length, padding in zip(__a , __a ):
__lowercase : Optional[int] = feat_extract(__a , max_length=__a , padding=__a )
__lowercase : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowercase : List[str] = feat_extract(
__a , truncation=__a , max_length=1000 , padding="""max_length""" , return_tensors="""np""" )
__lowercase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowercase : List[str] = feat_extract(
__a , truncation=__a , max_length=1000 , padding="""longest""" , return_tensors="""np""" )
__lowercase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
__lowercase : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowercase : Optional[int] = feat_extract(
__a , truncation=__a , max_length=2000 , padding="""longest""" , return_tensors="""np""" )
__lowercase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
import torch
__lowercase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase : Optional[int] = np.random.rand(100 ).astype(np.floataa )
__lowercase : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowercase : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__lowercase : Optional[Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
__lowercase : Optional[int] = WavaVecaConfig.from_pretrained(__a )
__lowercase : List[str] = WavaVecaFeatureExtractor.from_pretrained(__a )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
| 649
|
from torch import nn
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __a : int , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowercase : int = class_size
__lowercase : int = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowercase : str = nn.Linear(__a , __a )
def lowerCAmelCase ( self : Tuple , __a : int ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.mlp(__a )
return logits
| 649
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = ShapEImgaImgPipeline
_A : Optional[int] = ['''image''']
_A : List[Any] = ['''image''']
_A : str = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
_A : int = False
@property
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return 8
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowercase : Union[str, Any] = CLIPVisionModel(__a )
return model
@property
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase : str = CLIPImageProcessor(
crop_size=224 , do_center_crop=__a , do_normalize=__a , do_resize=__a , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : List[str] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__lowercase : str = PriorTransformer(**__a )
return model
@property
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : Optional[Any] = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__lowercase : Any = ShapERenderer(**__a )
return model
def lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.dummy_prior
__lowercase : Optional[int] = self.dummy_image_encoder
__lowercase : Any = self.dummy_image_processor
__lowercase : Optional[int] = self.dummy_renderer
__lowercase : str = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=__a , clip_sample=__a , clip_sample_range=1.0 , )
__lowercase : List[str] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCAmelCase ( self : Dict , __a : int , __a : str=0 ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__a ) ).to(__a )
if str(__a ).startswith("""mps""" ):
__lowercase : Dict = torch.manual_seed(__a )
else:
__lowercase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
__lowercase : List[Any] = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase : str = """cpu"""
__lowercase : Any = self.get_dummy_components()
__lowercase : int = self.pipeline_class(**__a )
__lowercase : Union[str, Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__lowercase : str = pipe(**self.get_dummy_inputs(__a ) )
__lowercase : List[str] = output.images[0]
__lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowercase : str = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = torch_device == """cpu"""
__lowercase : Tuple = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__a , relax_max_difference=__a , )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : int = self.get_dummy_components()
__lowercase : Any = self.pipeline_class(**__a )
__lowercase : List[str] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Tuple = 1
__lowercase : Dict = 2
__lowercase : Dict = self.get_dummy_inputs(__a )
for key in inputs.keys():
if key in self.batch_params:
__lowercase : Optional[Any] = batch_size * [inputs[key]]
__lowercase : Union[str, Any] = pipe(**__a , num_images_per_prompt=__a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
__lowercase : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
__lowercase : str = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
__lowercase : str = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Any = torch.Generator(device=__a ).manual_seed(0 )
__lowercase : Dict = pipe(
__a , generator=__a , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__a , __a )
| 649
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : str ):
__lowercase : Tuple = [x.strip() for x in open(lowerCAmelCase_ ).readlines()]
__lowercase : Dict = [x.strip() for x in open(lowerCAmelCase_ ).readlines()][: len(lowerCAmelCase_ )]
__lowercase : Tuple = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
if save_path is not None:
save_json(lowerCAmelCase_ , lowerCAmelCase_ , indent=lowerCAmelCase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 649
| 1
|
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[Any] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def snake_case_ ( lowerCAmelCase_ : int = 5000 ):
__lowercase : List[str] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )]
for i, pentagonal_i in enumerate(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
__lowercase : List[str] = pentagonal_nums[j]
__lowercase : Optional[int] = pentagonal_i + pentagonal_j
__lowercase : Any = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 649
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case_ ( lowerCAmelCase_ : Dict ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase ( __a ):
'''simple docstring'''
@staticmethod
def lowerCAmelCase ( __a : ArgumentParser ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=__a , default=__a , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=__a , help="""Name of the model to download""" )
download_parser.set_defaults(func=__a )
def __init__( self : Dict , __a : str , __a : str , __a : bool , __a : bool ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = model
__lowercase : List[Any] = cache
__lowercase : Any = force
__lowercase : Optional[int] = trust_remote_code
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 649
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Any = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 649
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCamelCase : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __a : List[str] , __a : Optional[int]=16 , __a : Optional[Any]=13 , __a : str=7 , __a : List[str]=14 , __a : Any=10 , __a : str=19 , __a : int=5 , __a : Any=4 , __a : List[Any]=True , __a : Tuple=16 , __a : Dict=2 , __a : Tuple=4 , __a : int=4 , __a : List[Any]="gelu" , __a : Tuple=0.1 , __a : List[str]=0.1 , __a : int=[1, 2, 3, 4, 5] , __a : str=25 , __a : Any=5 , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = d_model
__lowercase : Dict = parent
__lowercase : Tuple = batch_size
__lowercase : Optional[int] = prediction_length
__lowercase : List[str] = context_length
__lowercase : Any = cardinality
__lowercase : str = num_time_features
__lowercase : Optional[int] = lags_sequence
__lowercase : Optional[Any] = embedding_dimension
__lowercase : List[Any] = is_training
__lowercase : List[str] = hidden_size
__lowercase : int = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : int = hidden_act
__lowercase : str = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : str = context_length
__lowercase : int = prediction_length + label_length
__lowercase : Union[str, Any] = label_length
__lowercase : Optional[int] = moving_average
__lowercase : Optional[Any] = autocorrelation_factor
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCAmelCase ( self : Tuple , __a : str ) -> int:
"""simple docstring"""
__lowercase : Any = config.context_length + max(config.lags_sequence )
__lowercase : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowercase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowercase : Dict = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowercase : str = floats_tensor([self.batch_size, config.prediction_length] )
__lowercase : List[str] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_config()
__lowercase : Any = self.prepare_autoformer_inputs_dict(__a )
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] , __a : Tuple , __a : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel(config=__a ).to(__a ).eval()
__lowercase : Optional[int] = model(**__a )
__lowercase : Dict = outputs.encoder_last_hidden_state
__lowercase : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : List[str] = model.get_encoder()
encoder.save_pretrained(__a )
__lowercase : List[str] = AutoformerEncoder.from_pretrained(__a ).to(__a )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Any = model.create_network_inputs(**__a )
__lowercase , __lowercase : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowercase : Optional[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowercase : Union[str, Any] = encoder(inputs_embeds=__a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__lowercase : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowercase : Optional[int] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowercase : Any = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowercase : Dict = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Optional[Any] = model.get_decoder()
decoder.save_pretrained(__a )
__lowercase : Tuple = AutoformerDecoder.from_pretrained(__a ).to(__a )
__lowercase : str = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_A : List[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_A : Any = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_A : Dict = False
_A : Tuple = False
_A : Optional[int] = False
_A : Tuple = False
_A : str = False
_A : Union[str, Any] = False
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : List[str] = AutoformerModelTester(self )
__lowercase : Dict = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase , __lowercase : Tuple = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : Any = inspect.signature(getattr(__a , """forward""" ) )
# The main input is the name of the argument after `self`
__lowercase : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __a )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
__lowercase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Any = [*signature.parameters.keys()]
__lowercase : int = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__a )] , __a )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : int = True
__lowercase : Tuple = getattr(self.model_tester , """seq_length""" , __a )
__lowercase : Union[str, Any] = getattr(self.model_tester , """decoder_seq_length""" , __a )
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """d_model""" , __a )
__lowercase : Optional[int] = getattr(self.model_tester , """num_attention_heads""" , __a )
__lowercase : Any = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowercase : Dict = True
__lowercase : List[str] = False
__lowercase : Optional[int] = True
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : int = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase : Optional[int] = True
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Dict = outputs.encoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowercase : Tuple = len(__a )
__lowercase : str = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a )
# decoder attentions
__lowercase : List[Any] = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowercase : Optional[int] = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowercase : Tuple = True
__lowercase : Union[str, Any] = True
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Any = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 2 , len(__a ) )
__lowercase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def snake_case_ ( lowerCAmelCase_ : Optional[int]="train-batch.pt" ):
__lowercase : Dict = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=lowerCAmelCase_ , repo_type="""dataset""" )
__lowercase : Optional[int] = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
return batch
@require_torch
@slow
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[Any] = prepare_batch()
with torch.no_grad():
__lowercase : Tuple = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowercase : List[str] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[str] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : Optional[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowercase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : Optional[int] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : int = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowercase : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __a )
__lowercase : Optional[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__a )
__lowercase : Dict = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1E-1 ) )
| 649
| 1
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowerCamelCase : Any = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , **__a : List[str] ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""bs4"""] )
super().__init__(**__a )
def lowerCAmelCase ( self : List[Any] , __a : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = []
__lowercase : int = []
__lowercase : Optional[int] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__lowercase : Optional[Any] = parent.find_all(child.name , recursive=__a )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__a ) else next(i for i, s in enumerate(__a , 1 ) if s is child ) )
__lowercase : List[str] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCAmelCase ( self : int , __a : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : str = BeautifulSoup(__a , """html.parser""" )
__lowercase : Optional[int] = []
__lowercase : Optional[int] = []
__lowercase : str = []
for element in html_code.descendants:
if type(__a ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__lowercase : str = html.unescape(__a ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__a )
__lowercase , __lowercase : str = self.xpath_soup(__a )
stringaxtag_seq.append(__a )
stringaxsubs_seq.append(__a )
if len(__a ) != len(__a ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(__a ) != len(__a ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCAmelCase ( self : List[str] , __a : Tuple , __a : Any ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[int] = """"""
for tagname, subs in zip(__a , __a ):
xpath += F"/{tagname}"
if subs != 0:
xpath += F"[{subs}]"
return xpath
def __call__( self : Optional[Any] , __a : List[str] ) -> BatchFeature:
"""simple docstring"""
__lowercase : List[str] = False
# Check that strings has a valid type
if isinstance(__a , __a ):
__lowercase : List[str] = True
elif isinstance(__a , (list, tuple) ):
if len(__a ) == 0 or isinstance(html_strings[0] , __a ):
__lowercase : Union[str, Any] = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
F"but is of type {type(__a )}." )
__lowercase : Union[str, Any] = bool(isinstance(__a , (list, tuple) ) and (isinstance(html_strings[0] , __a )) )
if not is_batched:
__lowercase : int = [html_strings]
# Get nodes + xpaths
__lowercase : Tuple = []
__lowercase : Any = []
for html_string in html_strings:
__lowercase , __lowercase , __lowercase : Dict = self.get_three_from_single(__a )
nodes.append(__a )
__lowercase : List[str] = []
for node, tag_list, sub_list in zip(__a , __a , __a ):
__lowercase : List[Any] = self.construct_xpath(__a , __a )
xpath_strings.append(__a )
xpaths.append(__a )
# return as Dict
__lowercase : str = {"""nodes""": nodes, """xpaths""": xpaths}
__lowercase : Optional[Any] = BatchFeature(data=__a , tensor_type=__a )
return encoded_inputs
| 649
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCAmelCase ( __a , __a ):
'''simple docstring'''
_A : str = 1
@register_to_config
def __init__( self : Optional[int] , __a : Tuple=2000 , __a : List[str]=0.1 , __a : str=20 , __a : Optional[int]=1E-3 ) -> int:
"""simple docstring"""
__lowercase : Tuple = None
__lowercase : Union[str, Any] = None
__lowercase : int = None
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Union[str, torch.device] = None ) -> str:
"""simple docstring"""
__lowercase : List[str] = torch.linspace(1 , self.config.sampling_eps , __a , device=__a )
def lowerCAmelCase ( self : Tuple , __a : List[Any] , __a : Tuple , __a : int , __a : Optional[int]=None ) -> str:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowercase : Dict = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowercase : int = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowercase : Union[str, Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowercase : Optional[Any] = std.unsqueeze(-1 )
__lowercase : List[Any] = -score / std
# compute
__lowercase : Dict = -1.0 / len(self.timesteps )
__lowercase : int = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowercase : List[Any] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowercase : Union[str, Any] = beta_t.unsqueeze(-1 )
__lowercase : List[str] = -0.5 * beta_t * x
__lowercase : int = torch.sqrt(__a )
__lowercase : Union[str, Any] = drift - diffusion**2 * score
__lowercase : Optional[Any] = x + drift * dt
# add noise
__lowercase : List[str] = randn_tensor(x.shape , layout=x.layout , generator=__a , device=x.device , dtype=x.dtype )
__lowercase : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.config.num_train_timesteps
| 649
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = '''longformer'''
def __init__( self : Union[str, Any] , __a : Union[List[int], int] = 512 , __a : int = 2 , __a : int = 1 , __a : int = 0 , __a : int = 2 , __a : int = 30522 , __a : int = 768 , __a : int = 12 , __a : int = 12 , __a : int = 3072 , __a : str = "gelu" , __a : float = 0.1 , __a : float = 0.1 , __a : int = 512 , __a : int = 2 , __a : float = 0.02 , __a : float = 1E-12 , __a : bool = False , **__a : Union[str, Any] , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=__a , **__a )
__lowercase : Tuple = attention_window
__lowercase : str = sep_token_id
__lowercase : Tuple = bos_token_id
__lowercase : Optional[int] = eos_token_id
__lowercase : Any = vocab_size
__lowercase : Any = hidden_size
__lowercase : Dict = num_hidden_layers
__lowercase : List[str] = num_attention_heads
__lowercase : Dict = hidden_act
__lowercase : Dict = intermediate_size
__lowercase : Dict = hidden_dropout_prob
__lowercase : Optional[Any] = attention_probs_dropout_prob
__lowercase : int = max_position_embeddings
__lowercase : List[str] = type_vocab_size
__lowercase : List[str] = initializer_range
__lowercase : Union[str, Any] = layer_norm_eps
__lowercase : List[Any] = onnx_export
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Optional[Any] , __a : "PretrainedConfig" , __a : str = "default" , __a : "List[PatchingSpec]" = None ) -> Tuple:
"""simple docstring"""
super().__init__(__a , __a , __a )
__lowercase : Dict = True
@property
def lowerCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__lowercase : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowercase : Any = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def lowerCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowercase : Any = super().outputs
if self.task == "default":
__lowercase : Any = {0: """batch"""}
return outputs
@property
def lowerCAmelCase ( self : List[Any] ) -> float:
"""simple docstring"""
return 1E-4
@property
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return max(super().default_onnx_opset , 14 )
def lowerCAmelCase ( self : Dict , __a : "PreTrainedTokenizerBase" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = super().generate_dummy_inputs(
preprocessor=__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
__lowercase : Optional[int] = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
__lowercase : str = 1
return inputs
| 649
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : str = LongformerTokenizer
_A : int = True
_A : Optional[int] = LongformerTokenizerFast
_A : int = True
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowercase : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
__lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def lowerCAmelCase ( self : Optional[int] , **__a : Optional[Any] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Tuple , **__a : Tuple ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = """lower newer"""
__lowercase : int = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase : Dict = """lower newer"""
__lowercase : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowercase : str = tokenizer.tokenize(__a ) # , add_prefix_space=True)
self.assertListEqual(__a , __a )
__lowercase : int = tokens + [tokenizer.unk_token]
__lowercase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
__lowercase : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__a )
__lowercase : List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a )
__lowercase : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__lowercase : Any = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Tuple = """Encode this sequence."""
__lowercase : Optional[Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
__lowercase : Dict = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__a , __a )
__lowercase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__a , __a )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__lowercase : str = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__a , __a )
# Testing spaces after special tokens
__lowercase : List[Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__a , lstrip=__a , rstrip=__a )} ) # mask token has a left space
__lowercase : Dict = tokenizer.convert_tokens_to_ids(__a )
__lowercase : List[str] = """Encode <mask> sequence"""
__lowercase : List[str] = """Encode <mask>sequence"""
__lowercase : Union[str, Any] = tokenizer.encode(__a )
__lowercase : Dict = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__a , __a )
__lowercase : int = tokenizer.encode(__a )
__lowercase : Union[str, Any] = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__a , __a )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowercase : List[Any] = self.tokenizer_class.from_pretrained(__a , **__a )
__lowercase : Optional[Any] = """A, <mask> AllenNLP sentence."""
__lowercase : Union[str, Any] = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
__lowercase : Optional[Any] = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__lowercase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowercase : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""trim_offsets"""] , __a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase : int = F"{text_of_1_token} {text_of_1_token}"
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Any = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : int = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ) + 1, 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Dict = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
| 649
| 1
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def snake_case_ ( lowerCAmelCase_ : str=None ):
if subparsers is not None:
__lowercase : Optional[Any] = subparsers.add_parser("""env""" )
else:
__lowercase : Optional[int] = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=lowerCAmelCase_ , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def snake_case_ ( lowerCAmelCase_ : Any ):
__lowercase : str = torch.__version__
__lowercase : Any = torch.cuda.is_available()
__lowercase : Tuple = is_xpu_available()
__lowercase : List[str] = is_npu_available()
__lowercase : Union[str, Any] = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ):
__lowercase : Optional[Any] = load_config_from_file(args.config_file ).to_dict()
__lowercase : List[Any] = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F"{pt_version} ({pt_cuda_available})",
"""PyTorch XPU available""": str(lowerCAmelCase_ ),
"""PyTorch NPU available""": str(lowerCAmelCase_ ),
"""System RAM""": F"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
}
if pt_cuda_available:
__lowercase : int = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([F"- {prop}: {val}" for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
__lowercase : str = (
"""\n""".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else F"\t{accelerate_config}"
)
print(lowerCAmelCase_ )
__lowercase : Optional[int] = accelerate_config
return info
def snake_case_ ( ):
__lowercase : List[Any] = env_command_parser()
__lowercase : Tuple = parser.parse_args()
env_command(lowerCAmelCase_ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 649
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Dict , __a : Union[str, Any]=13 , __a : Dict=7 , __a : Dict=True , __a : Dict=True , __a : Any=True , __a : List[str]=True , __a : int=99 , __a : Optional[int]=32 , __a : str=2 , __a : int=4 , __a : List[str]=37 , __a : Union[str, Any]="gelu" , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=0.1 , __a : List[Any]=512 , __a : int=16 , __a : Union[str, Any]=2 , __a : Union[str, Any]=0.02 , __a : List[str]=3 , __a : Dict=4 , __a : Optional[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = parent
__lowercase : Tuple = 13
__lowercase : Dict = 7
__lowercase : List[Any] = True
__lowercase : Tuple = True
__lowercase : List[str] = True
__lowercase : Any = True
__lowercase : Optional[int] = 99
__lowercase : str = 384
__lowercase : Optional[Any] = 2
__lowercase : Dict = 4
__lowercase : str = 37
__lowercase : Optional[int] = """gelu"""
__lowercase : int = 0.1
__lowercase : Union[str, Any] = 0.1
__lowercase : Tuple = 512
__lowercase : Tuple = 16
__lowercase : Optional[int] = 2
__lowercase : Optional[Any] = 0.02
__lowercase : Dict = 3
__lowercase : Union[str, Any] = 4
__lowercase : Tuple = 128
__lowercase : Optional[Any] = 2
__lowercase : int = 9
__lowercase : List[Any] = 1
__lowercase : Union[str, Any] = None
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[Any] = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
if self.use_token_type_ids:
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Optional[Any] = None
__lowercase : str = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Optional[int] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict , __a : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : str , __a : Union[str, Any] , __a : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TFConvBertModel(config=__a )
__lowercase : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase : Any = [input_ids, input_mask]
__lowercase : Dict = model(__a )
__lowercase : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Any , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Dict , __a : str ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = TFConvBertForMaskedLM(config=__a )
__lowercase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : Any = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : int , __a : Any , __a : Optional[int] , __a : int , __a : int , __a : List[Any] , __a : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.num_labels
__lowercase : List[Any] = TFConvBertForSequenceClassification(config=__a )
__lowercase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[str] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] , __a : Any , __a : Optional[Any] , __a : int , __a : Optional[int] , __a : Tuple , __a : int , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = self.num_choices
__lowercase : Dict = TFConvBertForMultipleChoice(config=__a )
__lowercase : List[str] = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : int = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowercase : Dict = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[str] , __a : str , __a : List[str] , __a : List[str] , __a : List[str] , __a : Any , __a : Tuple , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Tuple = TFConvBertForTokenClassification(config=__a )
__lowercase : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : str = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , __a : Optional[int] , __a : List[str] , __a : Optional[Any] , __a : int , __a : Tuple , __a : Any , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = TFConvBertForQuestionAnswering(config=__a )
__lowercase : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[Any] = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : int = config_and_inputs
__lowercase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A : str = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Union[str, Any] = False
_A : List[str] = False
_A : Dict = False
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : int = TFConvBertModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Union[str, Any] = True
__lowercase : List[Any] = True
if hasattr(__a , """use_cache""" ):
__lowercase : Optional[Any] = True
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : int = getattr(self.model_tester , """key_length""" , __a )
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Tuple = model_class(__a )
__lowercase : Tuple = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
__lowercase : List[Any] = os.path.join(__a , """saved_model""" , """1""" )
__lowercase : str = tf.keras.models.load_model(__a )
__lowercase : Optional[int] = model(__a )
if self.is_encoder_decoder:
__lowercase : Union[str, Any] = outputs["""encoder_hidden_states"""]
__lowercase : Union[str, Any] = outputs["""encoder_attentions"""]
else:
__lowercase : Union[str, Any] = outputs["""hidden_states"""]
__lowercase : List[str] = outputs["""attentions"""]
self.assertEqual(len(__a ) , __a )
__lowercase : List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : List[str] = True
__lowercase : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
__lowercase : Optional[int] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : List[str] = getattr(self.model_tester , """key_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """key_length""" , __a )
def check_decoder_attentions_output(__a : List[str] ):
__lowercase : Union[str, Any] = len(__a )
self.assertEqual(out_len % 2 , 0 )
__lowercase : Any = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a : str ):
__lowercase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase : int = True
__lowercase : Any = False
__lowercase : List[Any] = model_class(__a )
__lowercase : Tuple = model(self._prepare_for_class(__a , __a ) )
__lowercase : Dict = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
__lowercase : Any = model_class(__a )
__lowercase : List[str] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase : Dict = True
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
__lowercase : List[str] = True
__lowercase : List[Any] = True
__lowercase : Any = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
__lowercase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase : Tuple = model(__a )[0]
__lowercase : Any = [1, 6, 768]
self.assertEqual(output.shape , __a )
__lowercase : Optional[Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 649
| 1
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
return [sentence[i : i + ngram_size] for i in range(len(lowerCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 649
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : int , *__a : Dict , **__a : Optional[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , __a , )
super().__init__(*__a , **__a )
| 649
| 1
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : int = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowerCamelCase : Dict = {
'''b0''': {
'''hidden_dim''': 12_80,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 2_24,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 12_80,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 2_40,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 14_08,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 2_60,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 15_36,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 3_00,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 17_92,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 3_80,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 20_48,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 4_56,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 23_04,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 5_28,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 25_60,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 6_00,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : Union[str, Any] = EfficientNetConfig()
__lowercase : int = CONFIG_MAP[model_name]["""hidden_dim"""]
__lowercase : List[Any] = CONFIG_MAP[model_name]["""width_coef"""]
__lowercase : Union[str, Any] = CONFIG_MAP[model_name]["""depth_coef"""]
__lowercase : List[Any] = CONFIG_MAP[model_name]["""image_size"""]
__lowercase : List[Any] = CONFIG_MAP[model_name]["""dropout_rate"""]
__lowercase : Dict = CONFIG_MAP[model_name]["""dw_padding"""]
__lowercase : int = """huggingface/label-files"""
__lowercase : int = """imagenet-1k-id2label.json"""
__lowercase : Dict = 1000
__lowercase : str = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Tuple = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Optional[Any] = idalabel
__lowercase : str = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( ):
__lowercase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : List[Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
def snake_case_ ( lowerCAmelCase_ : Optional[int] ):
__lowercase : Tuple = CONFIG_MAP[model_name]["""image_size"""]
__lowercase : Any = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=lowerCAmelCase_ , )
return preprocessor
def snake_case_ ( lowerCAmelCase_ : Optional[Any] ):
__lowercase : Any = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
__lowercase : Optional[int] = sorted(set(lowerCAmelCase_ ) )
__lowercase : Optional[int] = len(lowerCAmelCase_ )
__lowercase : Union[str, Any] = {b: str(lowerCAmelCase_ ) for b, i in zip(lowerCAmelCase_ , range(lowerCAmelCase_ ) )}
__lowercase : Any = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
__lowercase : List[Any] = block_name_mapping[b]
rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") )
rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") )
rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") )
rename_keys.append(
(F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") )
rename_keys.append(
(F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") )
rename_keys.append(
(F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") )
rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") )
rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") )
rename_keys.append(
(F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") )
rename_keys.append(
(F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") )
rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") )
rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") )
rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") )
rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") )
rename_keys.append(
(F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") )
rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") )
rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") )
rename_keys.append(
(F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") )
rename_keys.append(
(F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
__lowercase : List[str] = {}
for item in rename_keys:
if item[0] in original_param_names:
__lowercase : List[str] = """efficientnet.""" + item[1]
__lowercase : Optional[Any] = """classifier.weight"""
__lowercase : Optional[int] = """classifier.bias"""
return key_mapping
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict ):
for key, value in tf_params.items():
if "normalization" in key:
continue
__lowercase : str = key_mapping[key]
if "_conv" in key and "kernel" in key:
__lowercase : List[str] = torch.from_numpy(lowerCAmelCase_ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__lowercase : int = torch.from_numpy(lowerCAmelCase_ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__lowercase : Tuple = torch.from_numpy(np.transpose(lowerCAmelCase_ ) )
else:
__lowercase : List[Any] = torch.from_numpy(lowerCAmelCase_ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCAmelCase_ )
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ):
__lowercase : int = model_classes[model_name](
include_top=lowerCAmelCase_ , weights="""imagenet""" , input_tensor=lowerCAmelCase_ , input_shape=lowerCAmelCase_ , pooling=lowerCAmelCase_ , classes=1000 , classifier_activation="""softmax""" , )
__lowercase : Tuple = original_model.trainable_variables
__lowercase : str = original_model.non_trainable_variables
__lowercase : Optional[int] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__lowercase : Optional[Any] = param.numpy()
__lowercase : Optional[Any] = list(tf_params.keys() )
# Load HuggingFace model
__lowercase : str = get_efficientnet_config(lowerCAmelCase_ )
__lowercase : List[Any] = EfficientNetForImageClassification(lowerCAmelCase_ ).eval()
__lowercase : Union[str, Any] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
__lowercase : List[str] = rename_keys(lowerCAmelCase_ )
replace_params(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Initialize preprocessor and preprocess input image
__lowercase : Optional[Any] = convert_image_processor(lowerCAmelCase_ )
__lowercase : Any = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
__lowercase : Tuple = hf_model(**lowerCAmelCase_ )
__lowercase : List[Any] = outputs.logits.detach().numpy()
# Original model inference
__lowercase : Any = False
__lowercase : Optional[Any] = CONFIG_MAP[model_name]["""image_size"""]
__lowercase : int = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__lowercase : Tuple = image.img_to_array(lowerCAmelCase_ )
__lowercase : str = np.expand_dims(lowerCAmelCase_ , axis=0 )
__lowercase : Optional[Any] = original_model.predict(lowerCAmelCase_ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCAmelCase_ ):
os.mkdir(lowerCAmelCase_ )
# Save converted model and image processor
hf_model.save_pretrained(lowerCAmelCase_ )
preprocessor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
# Push model and image processor to hub
print(F"Pushing converted {model_name} to the hub..." )
__lowercase : List[str] = F"efficientnet-{model_name}"
preprocessor.push_to_hub(lowerCAmelCase_ )
hf_model.push_to_hub(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowerCamelCase : Any = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 649
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__lowercase : List[str] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Dict = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__lowercase : List[str] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 16000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__lowercase : Tuple = tempfile.mkdtemp()
__lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , __a )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
# load decoder from hub
__lowercase : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def lowerCAmelCase ( self : Optional[Any] , **__a : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__a )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , **__a : int ) -> Tuple:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Union[str, Any] , **__a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Any = self.get_feature_extractor()
__lowercase : str = self.get_decoder()
__lowercase : Tuple = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
processor.save_pretrained(self.tmpdirname )
__lowercase : Tuple = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __a )
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase : str = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__a , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : int = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[int] = floats_list((3, 1000) )
__lowercase : List[Any] = feature_extractor(__a , return_tensors="""np""" )
__lowercase : List[str] = processor(__a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : int = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = """This is a test string"""
__lowercase : Any = processor(text=__a )
__lowercase : Dict = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self : str , __a : Tuple=(2, 10, 16) , __a : int=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(__a )
return np.random.rand(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : str = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[str] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__lowercase : Optional[Any] = processor.decode(__a )
__lowercase : Any = decoder.decode_beams(__a )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowerCAmelCase ( self : List[str] , __a : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : Optional[int] = self.get_decoder()
__lowercase : Any = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase : Union[str, Any] = processor.batch_decode(__a )
else:
with get_context(__a ).Pool() as pool:
__lowercase : Optional[Any] = processor.batch_decode(__a , __a )
__lowercase : Union[str, Any] = list(__a )
with get_context("""fork""" ).Pool() as p:
__lowercase : Optional[Any] = decoder.decode_beams_batch(__a , __a )
__lowercase , __lowercase , __lowercase : Any = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__a , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__a , decoded_processor.logit_score )
self.assertListEqual(__a , decoded_processor.lm_score )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : List[str] = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = self._get_dummy_logits()
__lowercase : Tuple = 15
__lowercase : Tuple = -20.0
__lowercase : Dict = -4.0
__lowercase : Dict = processor.batch_decode(
__a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Tuple = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Any = decoder.decode_beams_batch(
__a , __a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][2] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __a )
self.assertTrue(np.array_equal(__a , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __a , atol=1E-3 ) )
self.assertTrue(np.array_equal(__a , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __a , atol=1E-3 ) )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : List[Any] = self.get_tokenizer()
__lowercase : List[Any] = self.get_decoder()
__lowercase : Dict = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[Any] = self._get_dummy_logits()
__lowercase : Optional[int] = 2.0
__lowercase : Tuple = 5.0
__lowercase : Optional[Any] = -20.0
__lowercase : Tuple = True
__lowercase : Union[str, Any] = processor.batch_decode(
__a , alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
__lowercase : Any = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
decoder.reset_params(
alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Tuple = decoder.decode_beams_batch(
__a , __a , )
__lowercase : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __a )
__lowercase : str = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : str = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : int = os.listdir(__a )
__lowercase : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(__a )
__lowercase : Dict = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : List[Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : Dict = os.listdir(__a )
__lowercase : List[Any] = os.listdir(__a )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = floats_list((3, 1000) )
__lowercase : List[str] = processor_wavaveca(__a , return_tensors="""np""" )
__lowercase : List[Any] = processor_auto(__a , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__lowercase : List[str] = self._get_dummy_logits()
__lowercase : List[str] = processor_wavaveca.batch_decode(__a )
__lowercase : Optional[int] = processor_auto.batch_decode(__a )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowerCAmelCase ( __a : Union[str, Any] , __a : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Any = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = self._get_dummy_logits()[0]
__lowercase : Dict = processor.decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = self._get_dummy_logits()
__lowercase : Dict = processor.batch_decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__a , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
import torch
__lowercase : Any = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__a )
__lowercase : str = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=16000 ) )
__lowercase : Tuple = iter(__a )
__lowercase : Union[str, Any] = next(__a )
__lowercase : int = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__lowercase : int = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase : Union[str, Any] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__lowercase : List[Any] = model(__a ).logits.cpu().numpy()
__lowercase : Tuple = processor.decode(logits[0] , output_word_offsets=__a )
__lowercase : int = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase : Optional[Any] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__lowercase : str = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , __a )
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , output.text )
# output times
__lowercase : Tuple = torch.tensor(self.get_from_offsets(__a , """start_time""" ) )
__lowercase : Dict = torch.tensor(self.get_from_offsets(__a , """end_time""" ) )
# fmt: off
__lowercase : List[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__lowercase : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
| 649
| 1
|
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[Any] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def snake_case_ ( lowerCAmelCase_ : int = 100 ):
__lowercase : str = 1
__lowercase : Dict = 2
for i in range(2 , max_n + 1 ):
__lowercase : List[Any] = pre_numerator
__lowercase : List[Any] = 2 * i // 3 if i % 3 == 0 else 1
__lowercase : Dict = cur_numerator
__lowercase : Tuple = e_cont * pre_numerator + temp
return sum_digits(lowerCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 649
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(0 ) == 0 )
def snake_case_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 649
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : Union[str, Any] = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 649
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase : int = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def snake_case_ ( ):
__lowercase : List[Any] = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase : Union[str, Any] = get_sagemaker_input()
else:
__lowercase : str = get_cluster_input()
return config
def snake_case_ ( lowerCAmelCase_ : List[str]=None ):
if subparsers is not None:
__lowercase : Optional[int] = subparsers.add_parser("""config""" , description=lowerCAmelCase_ )
else:
__lowercase : List[str] = argparse.ArgumentParser("""Accelerate config command""" , description=lowerCAmelCase_ )
parser.add_argument(
"""--config_file""" , default=lowerCAmelCase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : Union[str, Any] = get_user_input()
if args.config_file is not None:
__lowercase : List[Any] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
__lowercase : Any = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(lowerCAmelCase_ )
else:
config.to_yaml_file(lowerCAmelCase_ )
print(F"accelerate configuration saved at {config_file}" )
def snake_case_ ( ):
__lowercase : str = config_command_parser()
__lowercase : str = parser.parse_args()
config_command(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 649
| 1
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Any = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase : List[str] = """A painting of a squirrel eating a burger"""
__lowercase : Any = jax.device_count()
__lowercase : List[str] = num_samples * [prompt]
__lowercase : Dict = sd_pipe.prepare_inputs(__a )
__lowercase : List[str] = replicate(__a )
__lowercase : Tuple = shard(__a )
__lowercase : Union[str, Any] = jax.random.PRNGKey(0 )
__lowercase : int = jax.random.split(__a , jax.device_count() )
__lowercase : List[Any] = sd_pipe(__a , __a , __a , num_inference_steps=25 , jit=__a )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase : Any = images[0, 253:256, 253:256, -1]
__lowercase : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase : Optional[int] = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = """stabilityai/stable-diffusion-2"""
__lowercase , __lowercase : List[str] = FlaxDPMSolverMultistepScheduler.from_pretrained(__a , subfolder="""scheduler""" )
__lowercase , __lowercase : Any = FlaxStableDiffusionPipeline.from_pretrained(
__a , scheduler=__a , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase : Optional[int] = scheduler_params
__lowercase : Optional[int] = """A painting of a squirrel eating a burger"""
__lowercase : Optional[Any] = jax.device_count()
__lowercase : str = num_samples * [prompt]
__lowercase : Any = sd_pipe.prepare_inputs(__a )
__lowercase : int = replicate(__a )
__lowercase : Tuple = shard(__a )
__lowercase : str = jax.random.PRNGKey(0 )
__lowercase : Optional[int] = jax.random.split(__a , jax.device_count() )
__lowercase : str = sd_pipe(__a , __a , __a , num_inference_steps=25 , jit=__a )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase : Optional[Any] = images[0, 253:256, 253:256, -1]
__lowercase : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase : List[str] = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 649
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] | None = None ):
__lowercase : Tuple = word_bank or []
# create a table
__lowercase : int = len(lowerCAmelCase_ ) + 1
__lowercase : list[list[list[str]]] = []
for _ in range(lowerCAmelCase_ ):
table.append([] )
# seed value
__lowercase : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCAmelCase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCAmelCase_ )] == word:
__lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCAmelCase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCAmelCase_ )]:
combination.reverse()
return table[len(lowerCAmelCase_ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 649
| 1
|
def snake_case_ ( lowerCAmelCase_ : int ):
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : Tuple = 0
__lowercase : Union[str, Any] = number
while duplicate > 0:
__lowercase , __lowercase : Union[str, Any] = divmod(lowerCAmelCase_ , 10 )
fact_sum += factorial(lowerCAmelCase_ )
return fact_sum == number
if __name__ == "__main__":
print('''Program to check whether a number is a Krisnamurthy Number or not.''')
lowerCamelCase : Union[str, Any] = int(input('''Enter number: ''').strip())
print(
f'''{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.'''
)
| 649
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(1 ) != 0 )
def snake_case_ ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 649
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Any = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Any = '''marian'''
_A : Optional[Any] = ['''past_key_values''']
_A : Dict = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[Any] , __a : Dict=58101 , __a : Optional[int]=None , __a : int=1024 , __a : Dict=12 , __a : Optional[Any]=4096 , __a : Union[str, Any]=16 , __a : str=12 , __a : Optional[int]=4096 , __a : Optional[Any]=16 , __a : Any=0.0 , __a : Union[str, Any]=0.0 , __a : Dict=True , __a : str=True , __a : str="gelu" , __a : Dict=1024 , __a : Dict=0.1 , __a : Tuple=0.0 , __a : str=0.0 , __a : Any=0.02 , __a : Optional[Any]=58100 , __a : Union[str, Any]=False , __a : Union[str, Any]=58100 , __a : Optional[Any]=0 , __a : int=0 , __a : Dict=True , **__a : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = vocab_size
__lowercase : str = decoder_vocab_size or vocab_size
__lowercase : Any = max_position_embeddings
__lowercase : Optional[Any] = d_model
__lowercase : int = encoder_ffn_dim
__lowercase : List[str] = encoder_layers
__lowercase : Union[str, Any] = encoder_attention_heads
__lowercase : Tuple = decoder_ffn_dim
__lowercase : Any = decoder_layers
__lowercase : Tuple = decoder_attention_heads
__lowercase : Optional[Any] = dropout
__lowercase : Dict = attention_dropout
__lowercase : Optional[Any] = activation_dropout
__lowercase : Optional[int] = activation_function
__lowercase : Union[str, Any] = init_std
__lowercase : Union[str, Any] = encoder_layerdrop
__lowercase : Dict = decoder_layerdrop
__lowercase : Optional[Any] = use_cache
__lowercase : Union[str, Any] = encoder_layers
__lowercase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase : List[Any] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , **__a , )
class lowerCAmelCase ( __a ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCAmelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__lowercase : Any = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__lowercase : List[str] = {0: """batch"""}
__lowercase : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__lowercase : int = {0: """batch""", 1: """decoder_sequence"""}
__lowercase : List[str] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__a , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase : List[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__lowercase , __lowercase : List[str] = self.num_layers
for i in range(__a ):
__lowercase : Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
__lowercase : Tuple = {0: """batch""", 2: """past_sequence + sequence"""}
else:
__lowercase : Optional[int] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__lowercase : Dict = super().outputs
else:
__lowercase : List[Any] = super(__a , self ).outputs
if self.use_past:
__lowercase , __lowercase : Any = self.num_layers
for i in range(__a ):
__lowercase : int = {0: """batch""", 2: """past_sequence + sequence"""}
__lowercase : List[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def lowerCAmelCase ( self : Any , __a : PreTrainedTokenizer , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__lowercase : Dict = self._generate_dummy_inputs_for_encoder_and_decoder(
__a , __a , __a , __a , __a )
# Generate decoder inputs
__lowercase : Any = seq_length if not self.use_past else 1
__lowercase : Union[str, Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
__a , __a , __a , __a , __a )
__lowercase : Optional[Any] = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__lowercase : List[str] = dict(**__a , **__a )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowercase , __lowercase : Optional[int] = common_inputs["""input_ids"""].shape
__lowercase : List[Any] = common_inputs["""decoder_input_ids"""].shape[1]
__lowercase , __lowercase : List[str] = self.num_attention_heads
__lowercase : int = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase : Any = decoder_seq_length + 3
__lowercase : Union[str, Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase : Optional[int] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(__a , __a )] , dim=1 )
__lowercase : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase , __lowercase : Any = self.num_layers
__lowercase : str = min(__a , __a )
__lowercase : int = max(__a , __a ) - min_num_layers
__lowercase : List[str] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(__a ):
common_inputs["past_key_values"].append(
(
torch.zeros(__a ),
torch.zeros(__a ),
torch.zeros(__a ),
torch.zeros(__a ),
) )
# TODO: test this.
__lowercase : Union[str, Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(__a , __a ):
common_inputs["past_key_values"].append((torch.zeros(__a ), torch.zeros(__a )) )
return common_inputs
def lowerCAmelCase ( self : Optional[int] , __a : PreTrainedTokenizer , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__lowercase : Tuple = self._generate_dummy_inputs_for_encoder_and_decoder(
__a , __a , __a , __a , __a )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowercase , __lowercase : Any = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowercase : Optional[Any] = seqlen + 2
__lowercase , __lowercase : Dict = self.num_layers
__lowercase , __lowercase : Optional[int] = self.num_attention_heads
__lowercase : List[str] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase : str = common_inputs["""attention_mask"""].dtype
__lowercase : Union[str, Any] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(__a , __a , dtype=__a )] , dim=1 )
__lowercase : Union[str, Any] = [
(torch.zeros(__a ), torch.zeros(__a )) for _ in range(__a )
]
return common_inputs
def lowerCAmelCase ( self : List[str] , __a : PreTrainedTokenizer , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase : str = tokenizer.num_special_tokens_to_add(__a )
__lowercase : Any = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
__lowercase : int = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase : Union[str, Any] = dict(tokenizer(__a , return_tensors=__a ) )
return common_inputs
def lowerCAmelCase ( self : int , __a : PreTrainedTokenizer , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__lowercase : str = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
else:
__lowercase : Optional[Any] = self._generate_dummy_inputs_for_causal_lm(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
return common_inputs
def lowerCAmelCase ( self : Union[str, Any] , __a : Dict , __a : Any , __a : Optional[Any] , __a : str ) -> int:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__lowercase : Tuple = super()._flatten_past_key_values_(__a , __a , __a , __a )
else:
__lowercase : List[str] = super(__a , self )._flatten_past_key_values_(
__a , __a , __a , __a )
@property
def lowerCAmelCase ( self : str ) -> float:
"""simple docstring"""
return 1E-4
| 649
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : int = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 649
| 1
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Any = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
__lowercase : Tuple = TOKENIZER_CLASSES
else:
__lowercase : Union[str, Any] = {tokenizer_name: getattr(lowerCAmelCase_ , tokenizer_name + """Fast""" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
__lowercase : int = TOKENIZER_CLASSES[tokenizer_name]
__lowercase : Any = True
if checkpoint_name is None:
__lowercase : str = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowercase : Optional[int] = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
__lowercase : List[str] = tokenizer_class.from_pretrained(lowerCAmelCase_ , force_download=lowerCAmelCase_ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowercase , __lowercase : Any = checkpoint.split("""/""" )
__lowercase : Optional[int] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
elif add_prefix:
__lowercase : Any = checkpoint
__lowercase : Optional[int] = dump_path
else:
__lowercase : List[Any] = None
__lowercase : Dict = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowercase : Tuple = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowercase : str = file_path.split(lowerCAmelCase_ )[-1][0]
if next_char == "/":
__lowercase : List[str] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
__lowercase : str = tokenizer.save_pretrained(
lowerCAmelCase_ , legacy_format=lowerCAmelCase_ , filename_prefix=lowerCAmelCase_ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("""tokenizer.json""" ):
os.remove(lowerCAmelCase_ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
lowerCamelCase : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 649
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCamelCase : Any = None
try:
import msvcrt
except ImportError:
lowerCamelCase : str = None
try:
import fcntl
except ImportError:
lowerCamelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
lowerCamelCase : Tuple = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
lowerCamelCase : Tuple = '''3.0.12'''
lowerCamelCase : Any = None
def snake_case_ ( ):
global _logger
__lowercase : List[str] = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , __a : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = lock_file
return None
def __str__( self : str ) -> Any:
"""simple docstring"""
__lowercase : Any = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = lock
return None
def __enter__( self : Dict ) -> Dict:
"""simple docstring"""
return self.lock
def __exit__( self : Optional[int] , __a : Dict , __a : Any , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.lock.release()
return None
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __a : Any , __a : Dict=-1 , __a : Optional[Any]=None ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowercase : Dict = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
__lowercase : Optional[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowercase : int = None
# The default timeout value.
__lowercase : Optional[int] = timeout
# We use this lock primarily for the lock counter.
__lowercase : Optional[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowercase : Union[str, Any] = 0
return None
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self._lock_file
@property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self._timeout
@timeout.setter
def lowerCAmelCase ( self : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = float(__a )
return None
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@property
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self._lock_file_fd is not None
def lowerCAmelCase ( self : Any , __a : Optional[Any]=None , __a : Union[str, Any]=0.05 ) -> List[str]:
"""simple docstring"""
if timeout is None:
__lowercase : Union[str, Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowercase : int = id(self )
__lowercase : Optional[Any] = self._lock_file
__lowercase : List[str] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowercase : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowercase : Optional[Any] = id(self )
__lowercase : str = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
__lowercase : List[str] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : List[str] , __a : str , __a : int , __a : List[Any] ) -> Tuple:
"""simple docstring"""
self.release()
return None
def __del__( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.release(force=__a )
return None
def lowerCAmelCase ( self : Tuple , __a : str , __a : int ) -> str:
"""simple docstring"""
__lowercase : List[Any] = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
__lowercase : int = os.path.dirname(__a )
__lowercase : List[str] = str(hash(__a ) )
__lowercase : Optional[Any] = filename[: max_length - len(__a ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__a , __a )
else:
return path
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : List[Any] , __a : Optional[int]=-1 , __a : Tuple=None ) -> List[Any]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
__lowercase : Tuple = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowercase : Tuple = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
__lowercase : Union[str, Any] = fd
return None
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self._lock_file_fd
__lowercase : int = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : List[str] , __a : Optional[Any] , __a : str=-1 , __a : List[str]=None ) -> Any:
"""simple docstring"""
__lowercase : Dict = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowercase : List[str] = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
__lowercase : str = fd
return None
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = self._lock_file_fd
__lowercase : List[str] = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowercase : Union[str, Any] = os.open(self._lock_file , __a )
except OSError:
pass
else:
__lowercase : Optional[int] = fd
return None
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
os.close(self._lock_file_fd )
__lowercase : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCamelCase : Optional[Any] = None
if msvcrt:
lowerCamelCase : List[Any] = WindowsFileLock
elif fcntl:
lowerCamelCase : List[Any] = UnixFileLock
else:
lowerCamelCase : Union[str, Any] = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 649
| 1
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowerCamelCase : Optional[int] = '''Usage of script: script_name <size_of_canvas:int>'''
lowerCamelCase : List[str] = [0] * 1_00 + [1] * 10
random.shuffle(choice)
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : Tuple = [[False for i in range(lowerCAmelCase_ )] for j in range(lowerCAmelCase_ )]
return canvas
def snake_case_ ( lowerCAmelCase_ : list[list[bool]] ):
for i, row in enumerate(lowerCAmelCase_ ):
for j, _ in enumerate(lowerCAmelCase_ ):
__lowercase : Union[str, Any] = bool(random.getrandbits(1 ) )
def snake_case_ ( lowerCAmelCase_ : list[list[bool]] ):
__lowercase : Union[str, Any] = np.array(lowerCAmelCase_ )
__lowercase : List[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(lowerCAmelCase_ ):
for c, pt in enumerate(lowerCAmelCase_ ):
__lowercase : Optional[Any] = __judge_point(
lowerCAmelCase_ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__lowercase : Optional[Any] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__lowercase : list[list[bool]] = current_canvas.tolist()
return return_canvas
def snake_case_ ( lowerCAmelCase_ : bool , lowerCAmelCase_ : list[list[bool]] ):
__lowercase : Tuple = 0
__lowercase : Tuple = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__lowercase : int = pt
if pt:
if alive < 2:
__lowercase : List[Any] = False
elif alive == 2 or alive == 3:
__lowercase : Dict = True
elif alive > 3:
__lowercase : List[str] = False
else:
if alive == 3:
__lowercase : Any = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowerCamelCase : str = int(sys.argv[1])
# main working structure of this module.
lowerCamelCase : Tuple = create_canvas(canvas_size)
seed(c)
lowerCamelCase ,lowerCamelCase : str = plt.subplots()
fig.show()
lowerCamelCase : List[Any] = ListedColormap(['''w''', '''k'''])
try:
while True:
lowerCamelCase : List[str] = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 649
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''layoutlmv3'''
def __init__( self : Dict , __a : List[str]=50265 , __a : str=768 , __a : List[Any]=12 , __a : List[Any]=12 , __a : List[str]=3072 , __a : Optional[Any]="gelu" , __a : Optional[int]=0.1 , __a : List[Any]=0.1 , __a : Tuple=512 , __a : int=2 , __a : Any=0.02 , __a : Union[str, Any]=1E-5 , __a : List[str]=1 , __a : List[Any]=0 , __a : int=2 , __a : str=1024 , __a : str=128 , __a : List[Any]=128 , __a : Tuple=True , __a : Optional[int]=32 , __a : Any=128 , __a : List[Any]=64 , __a : Tuple=256 , __a : str=True , __a : int=True , __a : Optional[Any]=True , __a : Any=224 , __a : str=3 , __a : List[str]=16 , __a : Union[str, Any]=None , **__a : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
vocab_size=__a , hidden_size=__a , num_hidden_layers=__a , num_attention_heads=__a , intermediate_size=__a , hidden_act=__a , hidden_dropout_prob=__a , attention_probs_dropout_prob=__a , max_position_embeddings=__a , type_vocab_size=__a , initializer_range=__a , layer_norm_eps=__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a , )
__lowercase : int = max_ad_position_embeddings
__lowercase : Any = coordinate_size
__lowercase : Optional[Any] = shape_size
__lowercase : str = has_relative_attention_bias
__lowercase : int = rel_pos_bins
__lowercase : Union[str, Any] = max_rel_pos
__lowercase : str = has_spatial_attention_bias
__lowercase : str = rel_ad_pos_bins
__lowercase : List[Any] = max_rel_ad_pos
__lowercase : Tuple = text_embed
__lowercase : int = visual_embed
__lowercase : Tuple = input_size
__lowercase : Dict = num_channels
__lowercase : str = patch_size
__lowercase : Optional[int] = classifier_dropout
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = version.parse('''1.12''' )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return 12
def lowerCAmelCase ( self : List[Any] , __a : "ProcessorMixin" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , __a )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase : Tuple = processor.tokenizer.num_special_tokens_to_add(__a )
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
__lowercase : Union[str, Any] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase : Tuple = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase : Tuple = self._generate_dummy_images(__a , __a , __a , __a )
__lowercase : int = dict(
processor(
__a , text=__a , boxes=__a , return_tensors=__a , ) )
return inputs
| 649
| 1
|
def snake_case_ ( lowerCAmelCase_ : list[int] ):
if not numbers:
return 0
if not isinstance(lowerCAmelCase_ , (list, tuple) ) or not all(
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
__lowercase : List[str] = numbers[0]
for i in range(1 , len(lowerCAmelCase_ ) ):
# update the maximum and minimum subarray products
__lowercase : int = numbers[i]
if number < 0:
__lowercase , __lowercase : Dict = min_till_now, max_till_now
__lowercase : Tuple = max(lowerCAmelCase_ , max_till_now * number )
__lowercase : List[str] = min(lowerCAmelCase_ , min_till_now * number )
# update the maximum product found till now
__lowercase : int = max(lowerCAmelCase_ , lowerCAmelCase_ )
return max_prod
| 649
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : str = None , __a : uuid.UUID = None , __a : Any=None , __a : List[Any]=None ) -> List[Any]:
"""simple docstring"""
if not conversation_id:
__lowercase : Any = uuid.uuida()
if past_user_inputs is None:
__lowercase : Dict = []
if generated_responses is None:
__lowercase : Dict = []
__lowercase : uuid.UUID = conversation_id
__lowercase : List[str] = past_user_inputs
__lowercase : List[str] = generated_responses
__lowercase : Optional[str] = text
def __eq__( self : Dict , __a : Dict ) -> Any:
"""simple docstring"""
if not isinstance(__a , __a ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase ( self : List[str] , __a : str , __a : bool = False ) -> Dict:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
__lowercase : Optional[int] = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
__lowercase : Dict = text
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowercase : Dict = None
def lowerCAmelCase ( self : Optional[int] , __a : str ) -> List[Any]:
"""simple docstring"""
self.generated_responses.append(__a )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : int ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
__lowercase : Optional[Any] = """user""" if is_user else """bot"""
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
__a , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , *__a : int , **__a : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*__a , **__a )
if self.tokenizer.pad_token_id is None:
__lowercase : List[Any] = self.tokenizer.eos_token
def lowerCAmelCase ( self : Union[str, Any] , __a : int=None , __a : Tuple=None , __a : Any=None , **__a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = {}
__lowercase : Tuple = {}
__lowercase : List[str] = {}
if min_length_for_response is not None:
__lowercase : Dict = min_length_for_response
if minimum_tokens is not None:
__lowercase : Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
__lowercase : Union[str, Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowercase : Union[str, Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__a )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[int] , __a : Union[Conversation, List[Conversation]] , __a : Dict=0 , **__a : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = super().__call__(__a , num_workers=__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase ( self : Union[str, Any] , __a : Conversation , __a : Tuple=32 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowercase : List[Any] = self.tokenizer._build_conversation_input_ids(__a )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowercase : Tuple = self._legacy_parse_and_tokenize(__a )
if self.framework == "pt":
__lowercase : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowercase : List[str] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase ( self : Any , __a : Dict , __a : Any=10 , **__a : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowercase : List[Any] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
__lowercase : Any = max_length - minimum_tokens
__lowercase : int = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowercase : Dict = model_inputs["""attention_mask"""][:, -trim:]
__lowercase : Union[str, Any] = model_inputs.pop("""conversation""" )
__lowercase : Tuple = max_length
__lowercase : int = self.model.generate(**__a , **__a )
if self.model.config.is_encoder_decoder:
__lowercase : Optional[int] = 1
else:
__lowercase : str = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase ( self : int , __a : Tuple , __a : List[Any]=True ) -> List[str]:
"""simple docstring"""
__lowercase : int = model_outputs["""output_ids"""]
__lowercase : Union[str, Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , )
__lowercase : List[str] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(__a )
return conversation
def lowerCAmelCase ( self : int , __a : Conversation ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = self.tokenizer.eos_token_id
__lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) )
if len(__a ) > self.tokenizer.model_max_length:
__lowercase : List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 649
| 1
|
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] ):
__lowercase : List[str] = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, nicht wahr?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__lowercase : int = {
"""wmt16-en-de-dist-12-1""": [28.3, 27.52],
"""wmt16-en-de-dist-6-1""": [27.4, 27.11],
"""wmt16-en-de-12-1""": [26.9, 25.75],
}
__lowercase : Optional[Any] = F"{src_lang}-{tgt_lang}"
__lowercase : int = F"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowercase : List[Any] = os.path.join(lowerCAmelCase_ , """README.md""" )
print(F"Generating {path}" )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(lowerCAmelCase_ )
# make sure we are under the root of the project
lowerCamelCase : int = Path(__file__).resolve().parent.parent.parent
lowerCamelCase : Dict = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
lowerCamelCase : List[Any] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 649
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__a , """depth_multiplier""" ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Tuple , __a : str=13 , __a : Dict=3 , __a : List[Any]=32 , __a : Any=0.25 , __a : Any=8 , __a : Optional[int]=8 , __a : Optional[int]=6 , __a : Dict=32 , __a : Tuple=True , __a : List[Any]=True , __a : Optional[int]=True , __a : Tuple="relu6" , __a : Optional[Any]=1280 , __a : str=0.1 , __a : str=0.02 , __a : Optional[Any]=True , __a : Tuple=True , __a : Dict=10 , __a : Optional[Any]=None , ) -> Any:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Tuple = batch_size
__lowercase : Dict = num_channels
__lowercase : Optional[int] = image_size
__lowercase : int = depth_multiplier
__lowercase : str = depth_divisible_by
__lowercase : int = min_depth
__lowercase : Tuple = expand_ratio
__lowercase : Optional[int] = tf_padding
__lowercase : Dict = output_stride
__lowercase : Dict = first_layer_is_expansion
__lowercase : Optional[Any] = finegrained_output
__lowercase : str = hidden_act
__lowercase : Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__lowercase : Optional[int] = classifier_dropout_prob
__lowercase : int = use_labels
__lowercase : Optional[int] = is_training
__lowercase : Dict = num_labels
__lowercase : Tuple = initializer_range
__lowercase : Optional[Any] = scope
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : List[Any] = None
__lowercase : Optional[Any] = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = MobileNetVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase ( self : List[str] , __a : Optional[int] , __a : List[str] , __a : str , __a : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = self.num_labels
__lowercase : Dict = MobileNetVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __a : List[str] , __a : Tuple , __a : Any , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.num_labels
__lowercase : List[Any] = MobileNetVaForSemanticSegmentation(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase : str = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : List[str] = config_and_inputs
__lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : List[str] = False
_A : List[str] = False
_A : Optional[int] = False
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = MobileNetVaModelTester(self )
__lowercase : int = MobileNetVaConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : int = [*signature.parameters.keys()]
__lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__a : List[Any] , __a : Tuple , __a : List[str] ):
__lowercase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Tuple = outputs.hidden_states
__lowercase : str = 16
self.assertEqual(len(__a ) , __a )
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Any = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = MobileNetVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Tuple = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__a )
__lowercase : str = self.default_image_processor
__lowercase : Tuple = prepare_img()
__lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : str = model(**__a )
# verify the logits
__lowercase : Union[str, Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : int = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : Dict = model.to(__a )
__lowercase : Tuple = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[int] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Any = outputs.logits
# verify the logits
__lowercase : Dict = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __a )
__lowercase : str = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) )
| 649
| 1
|
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowerCamelCase : Optional[Any] = parse(importlib.metadata.version('''torch'''))
def snake_case_ ( lowerCAmelCase_ : Union[str, Version] , lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}" )
__lowercase : Optional[Any] = STR_OPERATION_TO_FUNC[operation]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : Any = parse(importlib.metadata.version(lowerCAmelCase_ ) )
return operation(lowerCAmelCase_ , parse(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
return compare_versions(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
| 649
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def snake_case_ ( lowerCAmelCase_ : bool = True , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[str] ):
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
__lowercase : List[str] = False
if main_process_only:
__lowercase : Optional[int] = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ , disable=lowerCAmelCase_ )
| 649
| 1
|
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCamelCase : Dict = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
lowerCamelCase : str = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.15},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
lowerCamelCase : Tuple = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase : str = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
lowerCamelCase : Optional[Any] = '''allenai'''
def snake_case_ ( lowerCAmelCase_ : int ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__lowercase : Optional[Any] = dict((re.sub(r"""@@$""" , """""" , lowerCAmelCase_ ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , lowerCAmelCase_ ), v) for k, v in d.items() )
__lowercase : Any = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F"{k}</w>"]
__lowercase : Optional[int] = d[k] # restore
return da
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Any ):
# prep
assert os.path.exists(lowerCAmelCase_ )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
print(F"Writing results to {pytorch_dump_folder_path}" )
# handle various types of models
__lowercase : Tuple = basename(lowerCAmelCase_ )
__lowercase : str = dirname(lowerCAmelCase_ )
__lowercase : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
__lowercase : Optional[Any] = cls.hub_models()
__lowercase : Any = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
__lowercase : List[str] = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F"using checkpoint {checkpoint_file}" )
__lowercase : List[Any] = hub_utils.from_pretrained(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , archive_map=lowerCAmelCase_ , **lowerCAmelCase_ )
__lowercase : Union[str, Any] = vars(chkpt["""args"""]["""model"""] )
__lowercase : Optional[int] = args["""source_lang"""]
__lowercase : Tuple = args["""target_lang"""]
__lowercase : Optional[int] = dirname(lowerCAmelCase_ )
__lowercase : List[str] = basename(lowerCAmelCase_ )
# dicts
__lowercase : List[Any] = os.path.join(lowerCAmelCase_ , F"dict.{src_lang}.txt" )
__lowercase : Optional[Any] = os.path.join(lowerCAmelCase_ , F"dict.{tgt_lang}.txt" )
__lowercase : List[str] = Dictionary.load(lowerCAmelCase_ )
__lowercase : Tuple = rewrite_dict_keys(src_dict.indices )
__lowercase : int = len(lowerCAmelCase_ )
__lowercase : List[Any] = os.path.join(lowerCAmelCase_ , """vocab-src.json""" )
print(F"Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records" )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ , indent=lowerCAmelCase_ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
__lowercase : Optional[Any] = True
for k in src_vocab.keys():
if not k.islower():
__lowercase : Any = False
break
__lowercase : str = Dictionary.load(lowerCAmelCase_ )
__lowercase : List[Any] = rewrite_dict_keys(tgt_dict.indices )
__lowercase : Dict = len(lowerCAmelCase_ )
__lowercase : Optional[int] = os.path.join(lowerCAmelCase_ , """vocab-tgt.json""" )
print(F"Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records" )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ , indent=lowerCAmelCase_ ) )
# merges_file (bpecodes)
__lowercase : List[str] = os.path.join(lowerCAmelCase_ , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
__lowercase : Tuple = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
if os.path.exists(lowerCAmelCase_ ):
break
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as fin:
__lowercase : Optional[Any] = fin.read()
__lowercase : str = re.sub(r""" \d+$""" , """""" , lowerCAmelCase_ , 0 , re.M ) # remove frequency number
print(F"Generating {merges_file}" )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as fout:
fout.write(lowerCAmelCase_ )
# model config
__lowercase : int = os.path.join(lowerCAmelCase_ , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F"need to extend tokenizer to support bpe={args['bpe']}"
assert args["tokenizer"] == "moses", F"need to extend tokenizer to support bpe={args['tokenizer']}"
__lowercase : Any = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
__lowercase : Any = 5
__lowercase : List[str] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
__lowercase : List[Any] = best_score_hparams[model_dir]["""length_penalty"""]
else:
__lowercase : Optional[Any] = 1.0
print(F"Generating {fsmt_model_config_file}" )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ , indent=lowerCAmelCase_ ) )
# tokenizer config
__lowercase : Dict = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : List[str] = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1024,
"""do_lower_case""": do_lower_case,
}
print(F"Generating {fsmt_tokenizer_config_file}" )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ , indent=lowerCAmelCase_ ) )
# model
__lowercase : List[Any] = chkpt["""models"""][0]
__lowercase : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
__lowercase : int = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
__lowercase : Union[str, Any] = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Optional[int] = FSMTConfig.from_pretrained(lowerCAmelCase_ )
__lowercase : List[Any] = FSMTForConditionalGeneration(lowerCAmelCase_ )
# check that it loads ok
model_new.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
# save
__lowercase : int = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
print(F"Generating {pytorch_weights_dump_path}" )
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(F"cd {data_root}" )
print(F"transformers-cli upload {model_dir}" )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase : Dict = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 649
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : list[int] ):
if not nums:
return 0
__lowercase : Tuple = nums[0]
__lowercase : Tuple = 0
for num in nums[1:]:
__lowercase , __lowercase : List[str] = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
| 1
|
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def snake_case_ ( lowerCAmelCase_ : int ):
random.seed(lowerCAmelCase_ )
np.random.seed(lowerCAmelCase_ )
torch.manual_seed(lowerCAmelCase_ )
torch.cuda.manual_seed_all(lowerCAmelCase_ )
# ^^ safe to call this function even if cuda is not available
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __a : Iterable[torch.nn.Parameter] , __a : float = 0.9999 , __a : float = 0.0 , __a : int = 0 , __a : bool = False , __a : Union[float, int] = 1.0 , __a : Union[float, int] = 2 / 3 , __a : Optional[Any] = None , __a : Dict[str, Any] = None , **__a : Optional[Any] , ) -> int:
"""simple docstring"""
if isinstance(__a , torch.nn.Module ):
__lowercase : Tuple = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , __a , standard_warn=__a , )
__lowercase : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
__lowercase : str = True
if kwargs.get("""max_value""" , __a ) is not None:
__lowercase : Optional[int] = """The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , __a , standard_warn=__a )
__lowercase : Any = kwargs["""max_value"""]
if kwargs.get("""min_value""" , __a ) is not None:
__lowercase : List[str] = """The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , __a , standard_warn=__a )
__lowercase : Any = kwargs["""min_value"""]
__lowercase : Any = list(__a )
__lowercase : Any = [p.clone().detach() for p in parameters]
if kwargs.get("""device""" , __a ) is not None:
__lowercase : Tuple = """The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , __a , standard_warn=__a )
self.to(device=kwargs["""device"""] )
__lowercase : Optional[Any] = None
__lowercase : str = decay
__lowercase : Union[str, Any] = min_decay
__lowercase : int = update_after_step
__lowercase : Any = use_ema_warmup
__lowercase : List[str] = inv_gamma
__lowercase : Dict = power
__lowercase : str = 0
__lowercase : Optional[int] = None # set in `step()`
__lowercase : Any = model_cls
__lowercase : Optional[Any] = model_config
@classmethod
def lowerCAmelCase ( cls : List[str] , __a : int , __a : Any ) -> "EMAModel":
"""simple docstring"""
__lowercase , __lowercase : str = model_cls.load_config(__a , return_unused_kwargs=__a )
__lowercase : List[Any] = model_cls.from_pretrained(__a )
__lowercase : Union[str, Any] = cls(model.parameters() , model_cls=__a , model_config=model.config )
ema_model.load_state_dict(__a )
return ema_model
def lowerCAmelCase ( self : Optional[int] , __a : str ) -> str:
"""simple docstring"""
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
__lowercase : List[Any] = self.model_cls.from_config(self.model_config )
__lowercase : Optional[int] = self.state_dict()
state_dict.pop("""shadow_params""" , __a )
model.register_to_config(**__a )
self.copy_to(model.parameters() )
model.save_pretrained(__a )
def lowerCAmelCase ( self : Union[str, Any] , __a : int ) -> float:
"""simple docstring"""
__lowercase : Optional[Any] = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
__lowercase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
__lowercase : Optional[int] = (1 + step) / (10 + step)
__lowercase : int = min(__a , self.decay )
# make sure decay is not smaller than min_decay
__lowercase : Optional[Any] = max(__a , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase ( self : List[Any] , __a : Iterable[torch.nn.Parameter] ) -> List[Any]:
"""simple docstring"""
if isinstance(__a , torch.nn.Module ):
__lowercase : Tuple = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , __a , standard_warn=__a , )
__lowercase : Union[str, Any] = parameters.parameters()
__lowercase : Any = list(__a )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
__lowercase : Union[str, Any] = self.get_decay(self.optimization_step )
__lowercase : List[Any] = decay
__lowercase : List[str] = 1 - decay
__lowercase : Any = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , __a ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
__lowercase : Optional[int] = deepspeed.zero.GatheredParameters(__a , modifier_rank=__a )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__a )
def lowerCAmelCase ( self : Union[str, Any] , __a : Iterable[torch.nn.Parameter] ) -> None:
"""simple docstring"""
__lowercase : Tuple = list(__a )
for s_param, param in zip(self.shadow_params , __a ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase ( self : Any , __a : List[Any]=None , __a : List[str]=None ) -> None:
"""simple docstring"""
__lowercase : List[str] = [
p.to(device=__a , dtype=__a ) if p.is_floating_point() else p.to(device=__a )
for p in self.shadow_params
]
def lowerCAmelCase ( self : Tuple ) -> dict:
"""simple docstring"""
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase ( self : Union[str, Any] , __a : Iterable[torch.nn.Parameter] ) -> None:
"""simple docstring"""
__lowercase : int = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase ( self : List[Any] , __a : Iterable[torch.nn.Parameter] ) -> None:
"""simple docstring"""
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , __a ):
param.data.copy_(c_param.data )
# Better memory-wise.
__lowercase : Union[str, Any] = None
def lowerCAmelCase ( self : List[Any] , __a : dict ) -> None:
"""simple docstring"""
__lowercase : Union[str, Any] = copy.deepcopy(__a )
__lowercase : List[str] = state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
__lowercase : int = state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , __a ):
raise ValueError("""Invalid min_decay""" )
__lowercase : Tuple = state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , __a ):
raise ValueError("""Invalid optimization_step""" )
__lowercase : int = state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , __a ):
raise ValueError("""Invalid update_after_step""" )
__lowercase : Tuple = state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , __a ):
raise ValueError("""Invalid use_ema_warmup""" )
__lowercase : Dict = state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
__lowercase : str = state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
__lowercase : Dict = state_dict.get("""shadow_params""" , __a )
if shadow_params is not None:
__lowercase : int = shadow_params
if not isinstance(self.shadow_params , __a ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(__a , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 649
|
lowerCamelCase : List[str] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 649
| 1
|
def snake_case_ ( lowerCAmelCase_ : int ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("""Input value must be an 'int' type""" )
__lowercase : List[str] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Any=False ):
__lowercase : Any = """backbone.""" if is_semantic else """"""
__lowercase : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", """beit.embeddings.cls_token"""),
(F"{prefix}patch_embed.proj.weight", """beit.embeddings.patch_embeddings.projection.weight"""),
(F"{prefix}patch_embed.proj.bias", """beit.embeddings.patch_embeddings.projection.bias"""),
(F"{prefix}pos_embed", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : List[Any]=False ):
for i in range(config.num_hidden_layers ):
__lowercase : Tuple = """backbone.""" if is_semantic else """"""
# queries, keys and values
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
__lowercase : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
__lowercase : List[str] = in_proj_weight[
: config.hidden_size, :
]
__lowercase : Union[str, Any] = q_bias
__lowercase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
__lowercase : str = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
__lowercase : str = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
__lowercase : List[str] = gamma_a
__lowercase : Optional[int] = gamma_a
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
__lowercase : Tuple = dct.pop(lowerCAmelCase_ )
__lowercase : Tuple = val
def snake_case_ ( ):
__lowercase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : Any = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=False ):
__lowercase : Dict = False if """rvlcdip""" in checkpoint_url else True
__lowercase : Tuple = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase_ , use_mask_token=lowerCAmelCase_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__lowercase : Union[str, Any] = 1024
__lowercase : Optional[int] = 4096
__lowercase : List[Any] = 24
__lowercase : Dict = 16
# labels
if "rvlcdip" in checkpoint_url:
__lowercase : Optional[int] = 16
__lowercase : Any = """huggingface/label-files"""
__lowercase : Union[str, Any] = """rvlcdip-id2label.json"""
__lowercase : List[str] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Optional[int] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__lowercase : Optional[int] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )["""model"""]
__lowercase : Union[str, Any] = create_rename_keys(lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
# load HuggingFace model
__lowercase : Dict = BeitForMaskedImageModeling(lowerCAmelCase_ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
__lowercase : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" )
__lowercase : Optional[int] = encoding["""pixel_values"""]
__lowercase : str = model(lowerCAmelCase_ )
__lowercase : Tuple = outputs.logits
# verify logits
__lowercase : str = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowerCAmelCase_ ), "Shape of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
if has_lm_head:
__lowercase : Optional[Any] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
__lowercase : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase_ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
lowerCamelCase : List[str] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 649
| 1
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
|
from torch import nn
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __a : int , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowercase : int = class_size
__lowercase : int = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowercase : str = nn.Linear(__a , __a )
def lowerCAmelCase ( self : Tuple , __a : int ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.mlp(__a )
return logits
| 649
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : Any = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[Any] = '''pegasus'''
_A : int = ['''past_key_values''']
_A : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , __a : List[str]=50265 , __a : Optional[int]=1024 , __a : Dict=12 , __a : Dict=4096 , __a : Any=16 , __a : Dict=12 , __a : Optional[int]=4096 , __a : List[str]=16 , __a : Optional[Any]=0.0 , __a : List[str]=0.0 , __a : Union[str, Any]=True , __a : List[Any]=True , __a : Optional[Any]="gelu" , __a : Any=1024 , __a : str=0.1 , __a : int=0.0 , __a : Union[str, Any]=0.0 , __a : Dict=0.02 , __a : Dict=0 , __a : Optional[int]=False , __a : Dict=0 , __a : str=1 , __a : Any=1 , **__a : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = vocab_size
__lowercase : List[Any] = max_position_embeddings
__lowercase : Dict = d_model
__lowercase : Any = encoder_ffn_dim
__lowercase : Union[str, Any] = encoder_layers
__lowercase : Any = encoder_attention_heads
__lowercase : Optional[Any] = decoder_ffn_dim
__lowercase : str = decoder_layers
__lowercase : Optional[int] = decoder_attention_heads
__lowercase : Any = dropout
__lowercase : Any = attention_dropout
__lowercase : int = activation_dropout
__lowercase : List[Any] = activation_function
__lowercase : List[Any] = init_std
__lowercase : Any = encoder_layerdrop
__lowercase : Optional[Any] = decoder_layerdrop
__lowercase : str = use_cache
__lowercase : Dict = encoder_layers
__lowercase : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , **__a , )
@property
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return self.d_model
| 649
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : str ):
__lowercase : Tuple = [x.strip() for x in open(lowerCAmelCase_ ).readlines()]
__lowercase : Dict = [x.strip() for x in open(lowerCAmelCase_ ).readlines()][: len(lowerCAmelCase_ )]
__lowercase : Tuple = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
if save_path is not None:
save_json(lowerCAmelCase_ , lowerCAmelCase_ , indent=lowerCAmelCase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 649
| 1
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : int = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCamelCase : List[Any] = 25_00_04
lowerCamelCase : int = 25_00_20
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = MBartaaTokenizer
_A : Tuple = MBartaaTokenizerFast
_A : Optional[int] = True
_A : List[str] = True
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase : Union[str, Any] = MBartaaTokenizer(__a , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = """<s>"""
__lowercase : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__a ) , 1054 )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
__lowercase : str = MBartaaTokenizer(__a , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=__a )
__lowercase : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
__lowercase : Union[str, Any] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowercase : int = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = {"""input_ids""": [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__lowercase : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : int = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowercase : Union[str, Any] = self.tokenizer_class.from_pretrained(__a , **__a )
__lowercase : Tuple = tempfile.mkdtemp()
__lowercase : Dict = tokenizer_r.save_pretrained(__a )
__lowercase : Union[str, Any] = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__lowercase : Tuple = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
__lowercase : Any = tokenizer_r.from_pretrained(__a )
__lowercase : Any = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=True
__lowercase : Dict = tempfile.mkdtemp()
__lowercase : int = tokenizer_r.save_pretrained(__a , legacy_format=__a )
__lowercase : Any = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
__lowercase : Optional[int] = tokenizer_r.from_pretrained(__a )
__lowercase : Dict = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=False
__lowercase : Dict = tempfile.mkdtemp()
__lowercase : int = tokenizer_r.save_pretrained(__a , legacy_format=__a )
__lowercase : Optional[int] = tokenizer_p.save_pretrained(__a )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowercase : Optional[Any] = tokenizer_r.from_pretrained(__a )
__lowercase : List[Any] = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = '''facebook/mbart-large-50-one-to-many-mmt'''
_A : str = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
_A : str = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
_A : Any = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def lowerCAmelCase ( cls : Any ) -> List[str]:
"""simple docstring"""
__lowercase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__lowercase : Tuple = 1
return cls
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 250038 )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.assertIn(__a , self.tokenizer.all_special_ids )
__lowercase : List[Any] = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
__lowercase : Optional[Any] = self.tokenizer.decode(__a , skip_special_tokens=__a )
__lowercase : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , __a )
__lowercase : int = 10
__lowercase : Optional[Any] = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0]
self.assertEqual(ids[0] , __a )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__a ) , __a )
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250053, 250001] )
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = tempfile.mkdtemp()
__lowercase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a )
__lowercase : List[Any] = MBartaaTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a )
@require_torch
def lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors="""pt""" )
__lowercase : Dict = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__lowercase : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__lowercase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors="""pt""" )
__lowercase : str = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=10 , return_tensors="""pt""" )
__lowercase : List[str] = targets["""input_ids"""]
__lowercase : Optional[Any] = shift_tokens_right(__a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__a ) , {
# en_XX, A, test, EOS
"""input_ids""": [[250004, 62, 3034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250001,
} , )
| 649
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case_ ( lowerCAmelCase_ : Dict ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase ( __a ):
'''simple docstring'''
@staticmethod
def lowerCAmelCase ( __a : ArgumentParser ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=__a , default=__a , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=__a , help="""Name of the model to download""" )
download_parser.set_defaults(func=__a )
def __init__( self : Dict , __a : str , __a : str , __a : bool , __a : bool ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = model
__lowercase : List[Any] = cache
__lowercase : Any = force
__lowercase : Optional[int] = trust_remote_code
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 649
| 1
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] | None = None , lowerCAmelCase_ : dict[str, float] | None = None , lowerCAmelCase_ : bool = False , ):
__lowercase : Any = cipher_alphabet or [chr(lowerCAmelCase_ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
__lowercase : Union[str, Any] = {
"""a""": 0.08_497,
"""b""": 0.01_492,
"""c""": 0.02_202,
"""d""": 0.04_253,
"""e""": 0.11_162,
"""f""": 0.02_228,
"""g""": 0.02_015,
"""h""": 0.06_094,
"""i""": 0.07_546,
"""j""": 0.00_153,
"""k""": 0.01_292,
"""l""": 0.04_025,
"""m""": 0.02_406,
"""n""": 0.06_749,
"""o""": 0.07_507,
"""p""": 0.01_929,
"""q""": 0.00_095,
"""r""": 0.07_587,
"""s""": 0.06_327,
"""t""": 0.09_356,
"""u""": 0.02_758,
"""v""": 0.00_978,
"""w""": 0.02_560,
"""x""": 0.00_150,
"""y""": 0.01_994,
"""z""": 0.00_077,
}
else:
# Custom frequencies dictionary
__lowercase : Dict = frequencies_dict
if not case_sensitive:
__lowercase : Any = ciphertext.lower()
# Chi squared statistic values
__lowercase : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowerCAmelCase_ ) ):
__lowercase : str = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
__lowercase : Union[str, Any] = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowerCAmelCase_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
__lowercase : Dict = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
__lowercase : Optional[Any] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
__lowercase : Dict = decrypted_with_shift.lower().count(lowerCAmelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowercase : Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowercase : Optional[Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
__lowercase : Any = decrypted_with_shift.count(lowerCAmelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowercase : int = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowercase : str = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
__lowercase : Dict = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowerCAmelCase_ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
__lowercase : int = min(
lowerCAmelCase_ , key=lowerCAmelCase_ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
__lowercase
) , (
__lowercase
) ,
) : Dict = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 649
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCamelCase : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __a : List[str] , __a : Optional[int]=16 , __a : Optional[Any]=13 , __a : str=7 , __a : List[str]=14 , __a : Any=10 , __a : str=19 , __a : int=5 , __a : Any=4 , __a : List[Any]=True , __a : Tuple=16 , __a : Dict=2 , __a : Tuple=4 , __a : int=4 , __a : List[Any]="gelu" , __a : Tuple=0.1 , __a : List[str]=0.1 , __a : int=[1, 2, 3, 4, 5] , __a : str=25 , __a : Any=5 , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = d_model
__lowercase : Dict = parent
__lowercase : Tuple = batch_size
__lowercase : Optional[int] = prediction_length
__lowercase : List[str] = context_length
__lowercase : Any = cardinality
__lowercase : str = num_time_features
__lowercase : Optional[int] = lags_sequence
__lowercase : Optional[Any] = embedding_dimension
__lowercase : List[Any] = is_training
__lowercase : List[str] = hidden_size
__lowercase : int = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : int = hidden_act
__lowercase : str = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : str = context_length
__lowercase : int = prediction_length + label_length
__lowercase : Union[str, Any] = label_length
__lowercase : Optional[int] = moving_average
__lowercase : Optional[Any] = autocorrelation_factor
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCAmelCase ( self : Tuple , __a : str ) -> int:
"""simple docstring"""
__lowercase : Any = config.context_length + max(config.lags_sequence )
__lowercase : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowercase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowercase : Dict = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowercase : str = floats_tensor([self.batch_size, config.prediction_length] )
__lowercase : List[str] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_config()
__lowercase : Any = self.prepare_autoformer_inputs_dict(__a )
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] , __a : Tuple , __a : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel(config=__a ).to(__a ).eval()
__lowercase : Optional[int] = model(**__a )
__lowercase : Dict = outputs.encoder_last_hidden_state
__lowercase : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : List[str] = model.get_encoder()
encoder.save_pretrained(__a )
__lowercase : List[str] = AutoformerEncoder.from_pretrained(__a ).to(__a )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Any = model.create_network_inputs(**__a )
__lowercase , __lowercase : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowercase : Optional[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowercase : Union[str, Any] = encoder(inputs_embeds=__a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__lowercase : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowercase : Optional[int] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowercase : Any = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowercase : Dict = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Optional[Any] = model.get_decoder()
decoder.save_pretrained(__a )
__lowercase : Tuple = AutoformerDecoder.from_pretrained(__a ).to(__a )
__lowercase : str = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_A : List[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_A : Any = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_A : Dict = False
_A : Tuple = False
_A : Optional[int] = False
_A : Tuple = False
_A : str = False
_A : Union[str, Any] = False
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : List[str] = AutoformerModelTester(self )
__lowercase : Dict = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase , __lowercase : Tuple = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : Any = inspect.signature(getattr(__a , """forward""" ) )
# The main input is the name of the argument after `self`
__lowercase : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __a )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
__lowercase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Any = [*signature.parameters.keys()]
__lowercase : int = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__a )] , __a )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : int = True
__lowercase : Tuple = getattr(self.model_tester , """seq_length""" , __a )
__lowercase : Union[str, Any] = getattr(self.model_tester , """decoder_seq_length""" , __a )
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """d_model""" , __a )
__lowercase : Optional[int] = getattr(self.model_tester , """num_attention_heads""" , __a )
__lowercase : Any = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowercase : Dict = True
__lowercase : List[str] = False
__lowercase : Optional[int] = True
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : int = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase : Optional[int] = True
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Dict = outputs.encoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowercase : Tuple = len(__a )
__lowercase : str = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a )
# decoder attentions
__lowercase : List[Any] = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowercase : Optional[int] = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowercase : Tuple = True
__lowercase : Union[str, Any] = True
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Any = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 2 , len(__a ) )
__lowercase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def snake_case_ ( lowerCAmelCase_ : Optional[int]="train-batch.pt" ):
__lowercase : Dict = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=lowerCAmelCase_ , repo_type="""dataset""" )
__lowercase : Optional[int] = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
return batch
@require_torch
@slow
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[Any] = prepare_batch()
with torch.no_grad():
__lowercase : Tuple = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowercase : List[str] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[str] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : Optional[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowercase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : Optional[int] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : int = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowercase : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __a )
__lowercase : Optional[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__a )
__lowercase : Dict = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1E-1 ) )
| 649
| 1
|
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCamelCase : Union[str, Any] = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
lowerCamelCase : List[str] = '''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
lowerCamelCase : Union[str, Any] = '''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
lowerCamelCase : Any = '''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] , )
def lowerCAmelCase ( self : Dict , __a : List[Any] ) -> int:
"""simple docstring"""
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def lowerCAmelCase ( self : List[Any] , __a : Dict , __a : Union[str, Any] , __a : Union[str, Any]=0.9 , __a : Union[str, Any]=3 , __a : Any=0.5 ) -> Optional[Any]:
"""simple docstring"""
if NLTK_VERSION >= version.Version("""3.6.5""" ):
__lowercase : Tuple = [
meteor_score.single_meteor_score(
word_tokenize(__a ) , word_tokenize(__a ) , alpha=__a , beta=__a , gamma=__a )
for ref, pred in zip(__a , __a )
]
else:
__lowercase : str = [
meteor_score.single_meteor_score(__a , __a , alpha=__a , beta=__a , gamma=__a )
for ref, pred in zip(__a , __a )
]
return {"meteor": np.mean(__a )}
| 649
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCAmelCase ( __a , __a ):
'''simple docstring'''
_A : str = 1
@register_to_config
def __init__( self : Optional[int] , __a : Tuple=2000 , __a : List[str]=0.1 , __a : str=20 , __a : Optional[int]=1E-3 ) -> int:
"""simple docstring"""
__lowercase : Tuple = None
__lowercase : Union[str, Any] = None
__lowercase : int = None
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Union[str, torch.device] = None ) -> str:
"""simple docstring"""
__lowercase : List[str] = torch.linspace(1 , self.config.sampling_eps , __a , device=__a )
def lowerCAmelCase ( self : Tuple , __a : List[Any] , __a : Tuple , __a : int , __a : Optional[int]=None ) -> str:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowercase : Dict = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowercase : int = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowercase : Union[str, Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowercase : Optional[Any] = std.unsqueeze(-1 )
__lowercase : List[Any] = -score / std
# compute
__lowercase : Dict = -1.0 / len(self.timesteps )
__lowercase : int = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowercase : List[Any] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowercase : Union[str, Any] = beta_t.unsqueeze(-1 )
__lowercase : List[str] = -0.5 * beta_t * x
__lowercase : int = torch.sqrt(__a )
__lowercase : Union[str, Any] = drift - diffusion**2 * score
__lowercase : Optional[Any] = x + drift * dt
# add noise
__lowercase : List[str] = randn_tensor(x.shape , layout=x.layout , generator=__a , device=x.device , dtype=x.dtype )
__lowercase : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.config.num_train_timesteps
| 649
| 1
|
import collections
import os
import re
from pathlib import Path
lowerCamelCase : List[str] = '''src/transformers'''
# Matches is_xxx_available()
lowerCamelCase : List[Any] = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
lowerCamelCase : Any = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase : Union[str, Any] = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
lowerCamelCase : Dict = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase : Optional[Any] = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase : Any = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase : str = re.compile(r'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase : Optional[Any] = re.compile(r'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
lowerCamelCase : Optional[int] = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
lowerCamelCase : str = re.compile(r'''^\s*try:''')
# Catches a line with else:
lowerCamelCase : str = re.compile(r'''^\s*else:''')
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ):
if _re_test_backend.search(lowerCAmelCase_ ) is None:
return None
__lowercase : str = [b[0] for b in _re_backend.findall(lowerCAmelCase_ )]
backends.sort()
return "_and_".join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Tuple ):
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__lowercase : List[Any] = f.readlines()
__lowercase : Optional[Any] = 0
while line_index < len(lowerCAmelCase_ ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCAmelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
__lowercase : Dict = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
__lowercase : Tuple = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCAmelCase_ ):
__lowercase : Any = _re_one_line_import_struct.search(lowerCAmelCase_ ).groups()[0]
__lowercase : int = re.findall(r"""\[([^\]]+)\]""" , lowerCAmelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
__lowercase : List[str] = _re_import_struct_key_value.search(lowerCAmelCase_ )
if single_line_import_search is not None:
__lowercase : int = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowerCAmelCase_ ) > 0]
objects.extend(lowerCAmelCase_ )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
__lowercase : Tuple = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowercase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
__lowercase : Union[str, Any] = lines[line_index]
if _re_import_struct_add_one.search(lowerCAmelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCAmelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCAmelCase_ ) is not None:
__lowercase : Optional[Any] = _re_import_struct_add_many.search(lowerCAmelCase_ ).groups()[0].split(""", """ )
__lowercase : Optional[int] = [obj[1:-1] for obj in imports if len(lowerCAmelCase_ ) > 0]
objects.extend(lowerCAmelCase_ )
elif _re_between_brackets.search(lowerCAmelCase_ ) is not None:
__lowercase : Optional[int] = _re_between_brackets.search(lowerCAmelCase_ ).groups()[0].split(""", """ )
__lowercase : int = [obj[1:-1] for obj in imports if len(lowerCAmelCase_ ) > 0]
objects.extend(lowerCAmelCase_ )
elif _re_quote_object.search(lowerCAmelCase_ ) is not None:
objects.append(_re_quote_object.search(lowerCAmelCase_ ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
__lowercase : List[str] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowercase : List[str] = []
while (
line_index < len(lowerCAmelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
__lowercase : Any = lines[line_index]
__lowercase : Optional[Any] = _re_import.search(lowerCAmelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowercase : Optional[int] = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCAmelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowercase : int = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
__lowercase : List[Any] = lines[line_index]
__lowercase : List[Any] = _re_import.search(lowerCAmelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowercase : List[str] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] ):
def find_duplicates(lowerCAmelCase_ : Tuple ):
return [k for k, v in collections.Counter(lowerCAmelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowercase : List[Any] = []
for key in import_dict_objects.keys():
__lowercase : Optional[Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
__lowercase : int = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowercase : str = """base imports""" if key == """none""" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def snake_case_ ( ):
__lowercase : List[str] = []
for root, _, files in os.walk(lowerCAmelCase_ ):
if "__init__.py" in files:
__lowercase : Any = os.path.join(lowerCAmelCase_ , """__init__.py""" )
__lowercase : Optional[int] = parse_init(lowerCAmelCase_ )
if objects is not None:
__lowercase : List[str] = analyze_results(*lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
__lowercase : Optional[Any] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("""\n""".join(lowerCAmelCase_ ) )
if len(lowerCAmelCase_ ) > 0:
raise ValueError("""\n\n""".join(lowerCAmelCase_ ) )
def snake_case_ ( ):
__lowercase : Tuple = []
for path, directories, files in os.walk(lowerCAmelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(lowerCAmelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCAmelCase_ ) / folder).glob("""*.py""" ) ) ) == 0:
continue
__lowercase : Optional[Any] = str((Path(lowerCAmelCase_ ) / folder).relative_to(lowerCAmelCase_ ) )
__lowercase : Optional[Any] = short_path.replace(os.path.sep , """.""" )
submodules.append(lowerCAmelCase_ )
for fname in files:
if fname == "__init__.py":
continue
__lowercase : Optional[int] = str((Path(lowerCAmelCase_ ) / fname).relative_to(lowerCAmelCase_ ) )
__lowercase : List[str] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(lowerCAmelCase_ )
return submodules
lowerCamelCase : Optional[int] = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def snake_case_ ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
__lowercase : List[str] = direct_transformers_import(lowerCAmelCase_ )
__lowercase : Tuple = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowerCAmelCase_ , """__init__.py""" ) , """r""" ) as f:
__lowercase : List[str] = f.read()
import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""" , lowerCAmelCase_ ) ) )
__lowercase : List[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowerCAmelCase_ ) > 0:
__lowercase : List[Any] = """\n""".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
F"{list_of_modules}\n"
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 649
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : str = LongformerTokenizer
_A : int = True
_A : Optional[int] = LongformerTokenizerFast
_A : int = True
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowercase : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
__lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def lowerCAmelCase ( self : Optional[int] , **__a : Optional[Any] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Tuple , **__a : Tuple ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = """lower newer"""
__lowercase : int = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase : Dict = """lower newer"""
__lowercase : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowercase : str = tokenizer.tokenize(__a ) # , add_prefix_space=True)
self.assertListEqual(__a , __a )
__lowercase : int = tokens + [tokenizer.unk_token]
__lowercase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
__lowercase : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__a )
__lowercase : List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a )
__lowercase : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__lowercase : Any = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Tuple = """Encode this sequence."""
__lowercase : Optional[Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
__lowercase : Dict = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__a , __a )
__lowercase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__a , __a )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__lowercase : str = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__a , __a )
# Testing spaces after special tokens
__lowercase : List[Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__a , lstrip=__a , rstrip=__a )} ) # mask token has a left space
__lowercase : Dict = tokenizer.convert_tokens_to_ids(__a )
__lowercase : List[str] = """Encode <mask> sequence"""
__lowercase : List[str] = """Encode <mask>sequence"""
__lowercase : Union[str, Any] = tokenizer.encode(__a )
__lowercase : Dict = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__a , __a )
__lowercase : int = tokenizer.encode(__a )
__lowercase : Union[str, Any] = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__a , __a )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowercase : List[Any] = self.tokenizer_class.from_pretrained(__a , **__a )
__lowercase : Optional[Any] = """A, <mask> AllenNLP sentence."""
__lowercase : Union[str, Any] = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
__lowercase : Optional[Any] = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__lowercase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowercase : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""trim_offsets"""] , __a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase : int = F"{text_of_1_token} {text_of_1_token}"
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Any = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : int = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ) + 1, 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Dict = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
| 649
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''dpt'''
def __init__( self : List[Any] , __a : Any=768 , __a : List[Any]=12 , __a : Union[str, Any]=12 , __a : Tuple=3072 , __a : Dict="gelu" , __a : Tuple=0.0 , __a : Dict=0.0 , __a : str=0.02 , __a : Dict=1E-12 , __a : str=384 , __a : Union[str, Any]=16 , __a : int=3 , __a : str=False , __a : List[Any]=True , __a : str=[2, 5, 8, 11] , __a : Union[str, Any]="project" , __a : Dict=[4, 2, 1, 0.5] , __a : Optional[Any]=[96, 192, 384, 768] , __a : Union[str, Any]=256 , __a : Optional[Any]=-1 , __a : List[str]=False , __a : Optional[int]=True , __a : Optional[Any]=0.4 , __a : List[Any]=255 , __a : Dict=0.1 , __a : Dict=[1, 1024, 24, 24] , __a : str=[0, 1] , __a : str=None , **__a : Dict , ) -> str:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Tuple = hidden_size
__lowercase : Any = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
__lowercase : List[Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
__lowercase : Dict = BitConfig(**__a )
elif isinstance(__a , __a ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
__lowercase : str = BitConfig(**__a )
elif isinstance(__a , __a ):
__lowercase : Optional[int] = backbone_config
else:
raise ValueError(
F"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}." )
__lowercase : Optional[int] = backbone_featmap_shape
__lowercase : Optional[int] = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
__lowercase : Optional[Any] = None
__lowercase : List[Any] = None
__lowercase : Dict = []
__lowercase : Dict = num_hidden_layers
__lowercase : Dict = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : Tuple = hidden_act
__lowercase : Optional[Any] = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : Optional[Any] = initializer_range
__lowercase : int = layer_norm_eps
__lowercase : Tuple = image_size
__lowercase : Dict = patch_size
__lowercase : Tuple = num_channels
__lowercase : Dict = qkv_bias
__lowercase : Any = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
__lowercase : Dict = readout_type
__lowercase : str = reassemble_factors
__lowercase : str = neck_hidden_sizes
__lowercase : Union[str, Any] = fusion_hidden_size
__lowercase : Tuple = head_in_index
__lowercase : Dict = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
__lowercase : Union[str, Any] = use_auxiliary_head
__lowercase : Union[str, Any] = auxiliary_loss_weight
__lowercase : Dict = semantic_loss_ignore_index
__lowercase : List[Any] = semantic_classifier_dropout
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__lowercase : List[str] = self.backbone_config.to_dict()
__lowercase : str = self.__class__.model_type
return output
| 649
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Dict , __a : Union[str, Any]=13 , __a : Dict=7 , __a : Dict=True , __a : Dict=True , __a : Any=True , __a : List[str]=True , __a : int=99 , __a : Optional[int]=32 , __a : str=2 , __a : int=4 , __a : List[str]=37 , __a : Union[str, Any]="gelu" , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=0.1 , __a : List[Any]=512 , __a : int=16 , __a : Union[str, Any]=2 , __a : Union[str, Any]=0.02 , __a : List[str]=3 , __a : Dict=4 , __a : Optional[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = parent
__lowercase : Tuple = 13
__lowercase : Dict = 7
__lowercase : List[Any] = True
__lowercase : Tuple = True
__lowercase : List[str] = True
__lowercase : Any = True
__lowercase : Optional[int] = 99
__lowercase : str = 384
__lowercase : Optional[Any] = 2
__lowercase : Dict = 4
__lowercase : str = 37
__lowercase : Optional[int] = """gelu"""
__lowercase : int = 0.1
__lowercase : Union[str, Any] = 0.1
__lowercase : Tuple = 512
__lowercase : Tuple = 16
__lowercase : Optional[int] = 2
__lowercase : Optional[Any] = 0.02
__lowercase : Dict = 3
__lowercase : Union[str, Any] = 4
__lowercase : Tuple = 128
__lowercase : Optional[Any] = 2
__lowercase : int = 9
__lowercase : List[Any] = 1
__lowercase : Union[str, Any] = None
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[Any] = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
if self.use_token_type_ids:
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Optional[Any] = None
__lowercase : str = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Optional[int] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict , __a : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : str , __a : Union[str, Any] , __a : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TFConvBertModel(config=__a )
__lowercase : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase : Any = [input_ids, input_mask]
__lowercase : Dict = model(__a )
__lowercase : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Any , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Dict , __a : str ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = TFConvBertForMaskedLM(config=__a )
__lowercase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : Any = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : int , __a : Any , __a : Optional[int] , __a : int , __a : int , __a : List[Any] , __a : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.num_labels
__lowercase : List[Any] = TFConvBertForSequenceClassification(config=__a )
__lowercase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[str] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] , __a : Any , __a : Optional[Any] , __a : int , __a : Optional[int] , __a : Tuple , __a : int , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = self.num_choices
__lowercase : Dict = TFConvBertForMultipleChoice(config=__a )
__lowercase : List[str] = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : int = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowercase : Dict = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[str] , __a : str , __a : List[str] , __a : List[str] , __a : List[str] , __a : Any , __a : Tuple , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Tuple = TFConvBertForTokenClassification(config=__a )
__lowercase : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : str = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , __a : Optional[int] , __a : List[str] , __a : Optional[Any] , __a : int , __a : Tuple , __a : Any , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = TFConvBertForQuestionAnswering(config=__a )
__lowercase : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[Any] = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : int = config_and_inputs
__lowercase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A : str = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Union[str, Any] = False
_A : List[str] = False
_A : Dict = False
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : int = TFConvBertModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Union[str, Any] = True
__lowercase : List[Any] = True
if hasattr(__a , """use_cache""" ):
__lowercase : Optional[Any] = True
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : int = getattr(self.model_tester , """key_length""" , __a )
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Tuple = model_class(__a )
__lowercase : Tuple = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
__lowercase : List[Any] = os.path.join(__a , """saved_model""" , """1""" )
__lowercase : str = tf.keras.models.load_model(__a )
__lowercase : Optional[int] = model(__a )
if self.is_encoder_decoder:
__lowercase : Union[str, Any] = outputs["""encoder_hidden_states"""]
__lowercase : Union[str, Any] = outputs["""encoder_attentions"""]
else:
__lowercase : Union[str, Any] = outputs["""hidden_states"""]
__lowercase : List[str] = outputs["""attentions"""]
self.assertEqual(len(__a ) , __a )
__lowercase : List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : List[str] = True
__lowercase : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
__lowercase : Optional[int] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : List[str] = getattr(self.model_tester , """key_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """key_length""" , __a )
def check_decoder_attentions_output(__a : List[str] ):
__lowercase : Union[str, Any] = len(__a )
self.assertEqual(out_len % 2 , 0 )
__lowercase : Any = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a : str ):
__lowercase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase : int = True
__lowercase : Any = False
__lowercase : List[Any] = model_class(__a )
__lowercase : Tuple = model(self._prepare_for_class(__a , __a ) )
__lowercase : Dict = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
__lowercase : Any = model_class(__a )
__lowercase : List[str] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase : Dict = True
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
__lowercase : List[str] = True
__lowercase : List[Any] = True
__lowercase : Any = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
__lowercase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase : Tuple = model(__a )[0]
__lowercase : Any = [1, 6, 768]
self.assertEqual(output.shape , __a )
__lowercase : Optional[Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 649
| 1
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowerCamelCase : Optional[List[str]] = None
lowerCamelCase : List[str] = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowerCamelCase : List[str] = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : bool = True
_A : Optional[str] = None
# Automatically constructed
_A : ClassVar[str] = "PIL.Image.Image"
_A : ClassVar[Any] = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
_A : str = field(default='''Image''' , init=__a , repr=__a )
def __call__( self : Tuple ) -> Tuple:
"""simple docstring"""
return self.pa_type
def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(__a , __a ):
__lowercase : str = np.array(__a )
if isinstance(__a , __a ):
return {"path": value, "bytes": None}
elif isinstance(__a , __a ):
return {"path": None, "bytes": value}
elif isinstance(__a , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__a )
elif isinstance(__a , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__a )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def lowerCAmelCase ( self : List[str] , __a : dict , __a : Union[str, Any]=None ) -> "PIL.Image.Image":
"""simple docstring"""
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
__lowercase : Union[str, Any] = {}
__lowercase , __lowercase : List[Any] = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(F"An image should have one of 'path' or 'bytes' but both are None in {value}." )
else:
if is_local_path(__a ):
__lowercase : str = PIL.Image.open(__a )
else:
__lowercase : Dict = path.split("""::""" )[-1]
try:
__lowercase : List[str] = string_to_dict(__a , config.HUB_DATASETS_URL )["""repo_id"""]
__lowercase : int = token_per_repo_id.get(__a )
except ValueError:
__lowercase : List[Any] = None
with xopen(__a , """rb""" , use_auth_token=__a ) as f:
__lowercase : Dict = BytesIO(f.read() )
__lowercase : int = PIL.Image.open(bytes_ )
else:
__lowercase : Optional[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowerCAmelCase ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def lowerCAmelCase ( self : List[str] , __a : Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
__lowercase : Optional[int] = pa.array([None] * len(__a ) , type=pa.binary() )
__lowercase : Optional[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__lowercase : Dict = pa.array([None] * len(__a ) , type=pa.string() )
__lowercase : Optional[int] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
__lowercase : List[Any] = storage.field("""bytes""" )
else:
__lowercase : Optional[int] = pa.array([None] * len(__a ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
__lowercase : Any = storage.field("""path""" )
else:
__lowercase : Optional[Any] = pa.array([None] * len(__a ) , type=pa.string() )
__lowercase : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__lowercase : Dict = pa.array(
[encode_np_array(np.array(__a ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__lowercase : Dict = pa.array([None] * len(__a ) , type=pa.string() )
__lowercase : Optional[Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(__a , self.pa_type )
def lowerCAmelCase ( self : Union[str, Any] , __a : pa.StructArray ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(__a : Any ):
with xopen(__a , """rb""" ) as f:
__lowercase : List[Any] = f.read()
return bytes_
__lowercase : Dict = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__lowercase : Dict = pa.array(
[os.path.basename(__a ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
__lowercase : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(__a , self.pa_type )
def snake_case_ ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__lowercase : List[str] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def snake_case_ ( lowerCAmelCase_ : "PIL.Image.Image" ):
__lowercase : List[Any] = BytesIO()
if image.format in list_image_compression_formats():
__lowercase : Any = image.format
else:
__lowercase : Any = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(lowerCAmelCase_ , format=lowerCAmelCase_ )
return buffer.getvalue()
def snake_case_ ( lowerCAmelCase_ : "PIL.Image.Image" ):
if hasattr(lowerCAmelCase_ , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )}
def snake_case_ ( lowerCAmelCase_ : np.ndarray ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
__lowercase : List[str] = array.dtype
__lowercase : Optional[Any] = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
__lowercase : int = dtype.kind
__lowercase : str = dtype.itemsize
__lowercase : List[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__lowercase : Union[str, Any] = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." )
if dtype is not dest_dtype:
warnings.warn(F"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__lowercase : int = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__lowercase : Dict = dtype_byteorder + dtype_kind + str(lowerCAmelCase_ )
__lowercase : Optional[int] = np.dtype(lowerCAmelCase_ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" )
__lowercase : Any = PIL.Image.fromarray(array.astype(lowerCAmelCase_ ) )
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )}
def snake_case_ ( lowerCAmelCase_ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
__lowercase , __lowercase : List[str] = first_non_null_value(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowerCAmelCase_ , np.ndarray ):
__lowercase : int = no_op_if_value_is_null(lowerCAmelCase_ )
return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs]
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
__lowercase : str = no_op_if_value_is_null(lowerCAmelCase_ )
return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs]
else:
return objs
else:
return objs
| 649
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : int , *__a : Dict , **__a : Optional[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , __a , )
super().__init__(*__a , **__a )
| 649
| 1
|
from importlib import import_module
from .logging import get_logger
lowerCamelCase : int = get_logger(__name__)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , __a : Any , __a : Optional[int]=None ) -> List[str]:
"""simple docstring"""
__lowercase : str = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , __a , getattr(__a , __a ) )
__lowercase : str = module._original_module if isinstance(__a , _PatchedModuleObj ) else module
class lowerCAmelCase :
'''simple docstring'''
_A : Tuple = []
def __init__( self : List[Any] , __a : Optional[int] , __a : str , __a : Any , __a : Dict=None ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = obj
__lowercase : Tuple = target
__lowercase : Tuple = new
__lowercase : List[str] = target.split(""".""" )[0]
__lowercase : Tuple = {}
__lowercase : Union[str, Any] = attrs or []
def __enter__( self : Any ) -> Tuple:
"""simple docstring"""
*__lowercase , __lowercase : str = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(__a ) ):
try:
__lowercase : Optional[Any] = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__lowercase : Optional[int] = getattr(self.obj , __a )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(__a , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__lowercase : int = obj_attr
# patch at top level
setattr(self.obj , __a , _PatchedModuleObj(__a , attrs=self.attrs ) )
__lowercase : List[Any] = getattr(self.obj , __a )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(__a , __a , _PatchedModuleObj(getattr(__a , __a , __a ) , attrs=self.attrs ) )
__lowercase : Tuple = getattr(__a , __a )
# finally set the target attribute
setattr(__a , __a , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__lowercase : Optional[Any] = getattr(import_module(""".""".join(__a ) ) , __a )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , __a ) is attr_value:
__lowercase : Tuple = getattr(self.obj , __a )
setattr(self.obj , __a , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__lowercase : int = globals()["""__builtins__"""][target_attr]
setattr(self.obj , __a , self.new )
else:
raise RuntimeError(F"Tried to patch attribute {target_attr} instead of a submodule." )
def __exit__( self : List[str] , *__a : Optional[int] ) -> str:
"""simple docstring"""
for attr in list(self.original ):
setattr(self.obj , __a , self.original.pop(__a ) )
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self.__enter__()
self._active_patches.append(self )
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 649
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__lowercase : List[str] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Dict = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__lowercase : List[str] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 16000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__lowercase : Tuple = tempfile.mkdtemp()
__lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , __a )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
# load decoder from hub
__lowercase : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def lowerCAmelCase ( self : Optional[Any] , **__a : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__a )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , **__a : int ) -> Tuple:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Union[str, Any] , **__a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Any = self.get_feature_extractor()
__lowercase : str = self.get_decoder()
__lowercase : Tuple = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
processor.save_pretrained(self.tmpdirname )
__lowercase : Tuple = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __a )
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase : str = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__a , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : int = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[int] = floats_list((3, 1000) )
__lowercase : List[Any] = feature_extractor(__a , return_tensors="""np""" )
__lowercase : List[str] = processor(__a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : int = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = """This is a test string"""
__lowercase : Any = processor(text=__a )
__lowercase : Dict = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self : str , __a : Tuple=(2, 10, 16) , __a : int=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(__a )
return np.random.rand(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : str = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[str] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__lowercase : Optional[Any] = processor.decode(__a )
__lowercase : Any = decoder.decode_beams(__a )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowerCAmelCase ( self : List[str] , __a : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : Optional[int] = self.get_decoder()
__lowercase : Any = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase : Union[str, Any] = processor.batch_decode(__a )
else:
with get_context(__a ).Pool() as pool:
__lowercase : Optional[Any] = processor.batch_decode(__a , __a )
__lowercase : Union[str, Any] = list(__a )
with get_context("""fork""" ).Pool() as p:
__lowercase : Optional[Any] = decoder.decode_beams_batch(__a , __a )
__lowercase , __lowercase , __lowercase : Any = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__a , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__a , decoded_processor.logit_score )
self.assertListEqual(__a , decoded_processor.lm_score )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : List[str] = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = self._get_dummy_logits()
__lowercase : Tuple = 15
__lowercase : Tuple = -20.0
__lowercase : Dict = -4.0
__lowercase : Dict = processor.batch_decode(
__a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Tuple = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Any = decoder.decode_beams_batch(
__a , __a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][2] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __a )
self.assertTrue(np.array_equal(__a , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __a , atol=1E-3 ) )
self.assertTrue(np.array_equal(__a , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __a , atol=1E-3 ) )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : List[Any] = self.get_tokenizer()
__lowercase : List[Any] = self.get_decoder()
__lowercase : Dict = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[Any] = self._get_dummy_logits()
__lowercase : Optional[int] = 2.0
__lowercase : Tuple = 5.0
__lowercase : Optional[Any] = -20.0
__lowercase : Tuple = True
__lowercase : Union[str, Any] = processor.batch_decode(
__a , alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
__lowercase : Any = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
decoder.reset_params(
alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Tuple = decoder.decode_beams_batch(
__a , __a , )
__lowercase : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __a )
__lowercase : str = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : str = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : int = os.listdir(__a )
__lowercase : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(__a )
__lowercase : Dict = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : List[Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : Dict = os.listdir(__a )
__lowercase : List[Any] = os.listdir(__a )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = floats_list((3, 1000) )
__lowercase : List[str] = processor_wavaveca(__a , return_tensors="""np""" )
__lowercase : List[Any] = processor_auto(__a , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__lowercase : List[str] = self._get_dummy_logits()
__lowercase : List[str] = processor_wavaveca.batch_decode(__a )
__lowercase : Optional[int] = processor_auto.batch_decode(__a )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowerCAmelCase ( __a : Union[str, Any] , __a : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Any = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = self._get_dummy_logits()[0]
__lowercase : Dict = processor.decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = self._get_dummy_logits()
__lowercase : Dict = processor.batch_decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__a , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
import torch
__lowercase : Any = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__a )
__lowercase : str = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=16000 ) )
__lowercase : Tuple = iter(__a )
__lowercase : Union[str, Any] = next(__a )
__lowercase : int = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__lowercase : int = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase : Union[str, Any] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__lowercase : List[Any] = model(__a ).logits.cpu().numpy()
__lowercase : Tuple = processor.decode(logits[0] , output_word_offsets=__a )
__lowercase : int = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase : Optional[Any] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__lowercase : str = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , __a )
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , output.text )
# output times
__lowercase : Tuple = torch.tensor(self.get_from_offsets(__a , """start_time""" ) )
__lowercase : Dict = torch.tensor(self.get_from_offsets(__a , """end_time""" ) )
# fmt: off
__lowercase : List[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__lowercase : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
| 649
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Any , __a : Any=7 , __a : Dict=3 , __a : Optional[Any]=30 , __a : int=400 , __a : Tuple=True , __a : Optional[int]=None , __a : str=0.9 , __a : Optional[Any]=None , __a : Tuple=True , __a : Union[str, Any]=[0.5, 0.5, 0.5] , __a : List[str]=[0.5, 0.5, 0.5] , ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[int] = size if size is not None else {"""shortest_edge""": 30}
__lowercase : int = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
__lowercase : str = parent
__lowercase : Dict = batch_size
__lowercase : Tuple = num_channels
__lowercase : Optional[Any] = min_resolution
__lowercase : Tuple = max_resolution
__lowercase : List[Any] = do_resize_and_center_crop
__lowercase : Dict = size
__lowercase : Optional[Any] = crop_pct
__lowercase : Dict = crop_size
__lowercase : Optional[Any] = do_normalize
__lowercase : List[str] = image_mean
__lowercase : Optional[int] = image_std
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = PoolFormerImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = PoolFormerImageProcessingTester(self )
@property
def lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(__a , """size""" ) )
self.assertTrue(hasattr(__a , """crop_pct""" ) )
self.assertTrue(hasattr(__a , """do_normalize""" ) )
self.assertTrue(hasattr(__a , """image_mean""" ) )
self.assertTrue(hasattr(__a , """image_std""" ) )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
__lowercase : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
__lowercase : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowercase : List[str] = image_processing(__a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
__lowercase : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowercase : Union[str, Any] = image_processing(__a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
__lowercase : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowercase : List[str] = image_processing(__a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 649
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(0 ) == 0 )
def snake_case_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 649
| 1
|
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] ):
# Initialise PyTorch model
__lowercase : Any = FunnelConfig.from_json_file(lowerCAmelCase_ )
print(F"Building PyTorch model from configuration: {config}" )
__lowercase : Optional[int] = FunnelBaseModel(lowerCAmelCase_ ) if base_model else FunnelModel(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
lowerCamelCase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 649
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase : int = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def snake_case_ ( ):
__lowercase : List[Any] = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase : Union[str, Any] = get_sagemaker_input()
else:
__lowercase : str = get_cluster_input()
return config
def snake_case_ ( lowerCAmelCase_ : List[str]=None ):
if subparsers is not None:
__lowercase : Optional[int] = subparsers.add_parser("""config""" , description=lowerCAmelCase_ )
else:
__lowercase : List[str] = argparse.ArgumentParser("""Accelerate config command""" , description=lowerCAmelCase_ )
parser.add_argument(
"""--config_file""" , default=lowerCAmelCase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : Union[str, Any] = get_user_input()
if args.config_file is not None:
__lowercase : List[Any] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
__lowercase : Any = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(lowerCAmelCase_ )
else:
config.to_yaml_file(lowerCAmelCase_ )
print(F"accelerate configuration saved at {config_file}" )
def snake_case_ ( ):
__lowercase : str = config_command_parser()
__lowercase : str = parser.parse_args()
config_command(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 649
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCamelCase : Union[str, Any] = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str ):
for attribute in key.split(""".""" ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
__lowercase : Optional[Any] = """lm_head"""
__lowercase : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
__lowercase : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
__lowercase : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__lowercase : Tuple = value
elif weight_type == "weight_g":
__lowercase : Union[str, Any] = value
elif weight_type == "weight_v":
__lowercase : Dict = value
elif weight_type == "bias":
__lowercase : str = value
else:
__lowercase : Optional[int] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ):
__lowercase : Optional[int] = []
__lowercase : Dict = fairseq_model.state_dict()
__lowercase : Tuple = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
__lowercase : Dict = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , )
__lowercase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
__lowercase : Optional[Any] = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowercase : Optional[int] = True
if "*" in mapped_key:
__lowercase : List[str] = name.split(lowerCAmelCase_ )[0].split(""".""" )[-2]
__lowercase : List[str] = mapped_key.replace("""*""" , lowerCAmelCase_ )
if "weight_g" in name:
__lowercase : List[Any] = """weight_g"""
elif "weight_v" in name:
__lowercase : Any = """weight_v"""
elif "bias" in name:
__lowercase : List[str] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase : Dict = """weight"""
else:
__lowercase : Tuple = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F"Unused weights: {unused_weights}" )
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ):
__lowercase : Optional[Any] = full_name.split("""conv_layers.""" )[-1]
__lowercase : Tuple = name.split(""".""" )
__lowercase : List[str] = int(items[0] )
__lowercase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__lowercase : int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__lowercase : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__lowercase : Dict = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__lowercase : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=True ):
if config_path is not None:
__lowercase : str = UniSpeechConfig.from_pretrained(lowerCAmelCase_ )
else:
__lowercase : Dict = UniSpeechConfig()
if is_finetuned:
if dict_path:
__lowercase : List[Any] = Dictionary.load_from_json(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase : Any = target_dict.pad_index
__lowercase : Optional[int] = target_dict.bos_index
__lowercase : Union[str, Any] = target_dict.eos_index
__lowercase : Tuple = len(target_dict.symbols )
__lowercase : Tuple = os.path.join(lowerCAmelCase_ , """vocab.json""" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowercase : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowercase : List[Any] = 42
__lowercase : Union[str, Any] = 43
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Optional[int] = WavaVecaPhonemeCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase_ , )
__lowercase : str = True if config.feat_extract_norm == """layer""" else False
__lowercase : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
__lowercase : Tuple = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
__lowercase : str = UniSpeechForCTC(lowerCAmelCase_ )
else:
__lowercase : Union[str, Any] = UniSpeechForPreTraining(lowerCAmelCase_ )
if is_finetuned:
__lowercase , __lowercase , __lowercase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} )
else:
__lowercase , __lowercase , __lowercase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__lowercase : Optional[int] = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
hf_unispeech.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCamelCase : str = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 649
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] | None = None ):
__lowercase : Tuple = word_bank or []
# create a table
__lowercase : int = len(lowerCAmelCase_ ) + 1
__lowercase : list[list[list[str]]] = []
for _ in range(lowerCAmelCase_ ):
table.append([] )
# seed value
__lowercase : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCAmelCase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCAmelCase_ )] == word:
__lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCAmelCase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCAmelCase_ )]:
combination.reverse()
return table[len(lowerCAmelCase_ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 649
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[Any] = '''table-transformer'''
_A : Optional[Any] = ['''past_key_values''']
_A : Union[str, Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Any , __a : Optional[Any]=True , __a : Any=None , __a : Tuple=3 , __a : Dict=100 , __a : str=6 , __a : List[str]=2048 , __a : Optional[int]=8 , __a : Union[str, Any]=6 , __a : str=2048 , __a : Dict=8 , __a : Dict=0.0 , __a : Dict=0.0 , __a : str=True , __a : Dict="relu" , __a : int=256 , __a : Any=0.1 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Optional[Any]=0.02 , __a : Tuple=1.0 , __a : Union[str, Any]=False , __a : Union[str, Any]="sine" , __a : Any="resnet50" , __a : Optional[Any]=True , __a : List[Any]=False , __a : Any=1 , __a : Optional[Any]=5 , __a : Any=2 , __a : Optional[Any]=1 , __a : Union[str, Any]=1 , __a : Dict=5 , __a : List[Any]=2 , __a : Dict=0.1 , **__a : Optional[int] , ) -> Any:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__lowercase : int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__a , __a ):
__lowercase : Dict = backbone_config.get("""model_type""" )
__lowercase : List[Any] = CONFIG_MAPPING[backbone_model_type]
__lowercase : Tuple = config_class.from_dict(__a )
# set timm attributes to None
__lowercase , __lowercase , __lowercase : Optional[Any] = None, None, None
__lowercase : Optional[int] = use_timm_backbone
__lowercase : str = backbone_config
__lowercase : Optional[Any] = num_channels
__lowercase : Dict = num_queries
__lowercase : Tuple = d_model
__lowercase : Dict = encoder_ffn_dim
__lowercase : Dict = encoder_layers
__lowercase : Optional[int] = encoder_attention_heads
__lowercase : Optional[Any] = decoder_ffn_dim
__lowercase : List[str] = decoder_layers
__lowercase : Optional[Any] = decoder_attention_heads
__lowercase : List[Any] = dropout
__lowercase : str = attention_dropout
__lowercase : Any = activation_dropout
__lowercase : List[Any] = activation_function
__lowercase : int = init_std
__lowercase : Union[str, Any] = init_xavier_std
__lowercase : int = encoder_layerdrop
__lowercase : List[Any] = decoder_layerdrop
__lowercase : Any = encoder_layers
__lowercase : List[Any] = auxiliary_loss
__lowercase : Tuple = position_embedding_type
__lowercase : Any = backbone
__lowercase : Union[str, Any] = use_pretrained_backbone
__lowercase : Optional[int] = dilation
# Hungarian matcher
__lowercase : Dict = class_cost
__lowercase : Optional[Any] = bbox_cost
__lowercase : Union[str, Any] = giou_cost
# Loss coefficients
__lowercase : Any = mask_loss_coefficient
__lowercase : int = dice_loss_coefficient
__lowercase : Any = bbox_loss_coefficient
__lowercase : int = giou_loss_coefficient
__lowercase : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a )
@property
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.d_model
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Union[str, Any] = version.parse('''1.11''' )
@property
def lowerCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowerCAmelCase ( self : int ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return 12
| 649
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(1 ) != 0 )
def snake_case_ ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 649
| 1
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCamelCase : str = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def snake_case_ ( lowerCAmelCase_ : Optional[Any]=None ):
if subparsers is not None:
__lowercase : Any = subparsers.add_parser("""tpu-config""" , description=_description )
else:
__lowercase : Dict = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
__lowercase : List[Any] = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=lowerCAmelCase_ , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=lowerCAmelCase_ , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
__lowercase : int = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=lowerCAmelCase_ , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def snake_case_ ( lowerCAmelCase_ : Any ):
__lowercase : int = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ):
__lowercase : Any = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__lowercase : int = defaults.command_file
if not args.command and defaults.commands is not None:
__lowercase : Dict = defaults.commands
if not args.tpu_name:
__lowercase : Optional[int] = defaults.tpu_name
if not args.tpu_zone:
__lowercase : Union[str, Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
__lowercase : List[Any] = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
__lowercase : Dict = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , lowerCAmelCase_ ):
__lowercase : List[Any] = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
__lowercase : List[Any] = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCAmelCase_ ):
__lowercase : Dict = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__lowercase : Optional[Any] = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
__lowercase : Tuple = """; """.join(lowerCAmelCase_ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__lowercase : Tuple = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(lowerCAmelCase_ )}" )
return
subprocess.run(lowerCAmelCase_ )
print("""Successfully setup pod.""" )
def snake_case_ ( ):
__lowercase : Tuple = tpu_command_parser()
__lowercase : Tuple = parser.parse_args()
tpu_command_launcher(lowerCAmelCase_ )
| 649
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : int = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 649
| 1
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = RobertaTokenizer
_A : Dict = RobertaTokenizerFast
_A : Optional[Any] = True
_A : List[str] = {'''cls_token''': '''<s>'''}
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowercase : List[str] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowercase : List[str] = {"""unk_token""": """<unk>"""}
__lowercase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def lowerCAmelCase ( self : Dict , **__a : Optional[int] ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , **__a : Tuple ) -> Any:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Union[str, Any] , __a : Any ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = """lower newer"""
__lowercase : Optional[Any] = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase : Dict = """lower newer"""
__lowercase : Dict = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowercase : List[Any] = tokenizer.tokenize(__a ) # , add_prefix_space=True)
self.assertListEqual(__a , __a )
__lowercase : int = tokens + [tokenizer.unk_token]
__lowercase : List[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.tokenizer_class.from_pretrained("""roberta-base""" )
__lowercase : int = tokenizer.encode("""sequence builders""" , add_special_tokens=__a )
__lowercase : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a )
__lowercase : Optional[int] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Dict = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a )
__lowercase : Dict = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
__lowercase : Dict = self.get_tokenizer()
__lowercase : str = """Encode this sequence."""
__lowercase : Optional[int] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
__lowercase : Union[str, Any] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__a , __a )
__lowercase : List[Any] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__a , __a )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__lowercase : Tuple = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__a , __a )
# Testing spaces after special tokens
__lowercase : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__a , lstrip=__a , rstrip=__a )} ) # mask token has a left space
__lowercase : Dict = tokenizer.convert_tokens_to_ids(__a )
__lowercase : List[Any] = """Encode <mask> sequence"""
__lowercase : Tuple = """Encode <mask>sequence"""
__lowercase : Optional[int] = tokenizer.encode(__a )
__lowercase : Union[str, Any] = encoded.index(__a )
__lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__a , __a )
__lowercase : List[str] = tokenizer.encode(__a )
__lowercase : str = encoded.index(__a )
__lowercase : Any = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__a , __a )
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowercase : List[Any] = self.tokenizer_class.from_pretrained(__a , **__a )
__lowercase : str = """A, <mask> AllenNLP sentence."""
__lowercase : List[str] = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
__lowercase : Optional[int] = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__lowercase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowercase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : List[str] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowercase : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""trim_offsets"""] , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : int = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase : List[Any] = F"{text_of_1_token} {text_of_1_token}"
__lowercase : Any = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Optional[int] = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Union[str, Any] = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : int = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Any = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : Dict = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowercase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Optional[int] = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ) + 1, 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Optional[int] = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : Any = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Union[str, Any] = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
| 649
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCamelCase : Any = None
try:
import msvcrt
except ImportError:
lowerCamelCase : str = None
try:
import fcntl
except ImportError:
lowerCamelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
lowerCamelCase : Tuple = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
lowerCamelCase : Tuple = '''3.0.12'''
lowerCamelCase : Any = None
def snake_case_ ( ):
global _logger
__lowercase : List[str] = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , __a : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = lock_file
return None
def __str__( self : str ) -> Any:
"""simple docstring"""
__lowercase : Any = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = lock
return None
def __enter__( self : Dict ) -> Dict:
"""simple docstring"""
return self.lock
def __exit__( self : Optional[int] , __a : Dict , __a : Any , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.lock.release()
return None
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __a : Any , __a : Dict=-1 , __a : Optional[Any]=None ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowercase : Dict = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
__lowercase : Optional[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowercase : int = None
# The default timeout value.
__lowercase : Optional[int] = timeout
# We use this lock primarily for the lock counter.
__lowercase : Optional[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowercase : Union[str, Any] = 0
return None
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self._lock_file
@property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self._timeout
@timeout.setter
def lowerCAmelCase ( self : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = float(__a )
return None
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@property
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self._lock_file_fd is not None
def lowerCAmelCase ( self : Any , __a : Optional[Any]=None , __a : Union[str, Any]=0.05 ) -> List[str]:
"""simple docstring"""
if timeout is None:
__lowercase : Union[str, Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowercase : int = id(self )
__lowercase : Optional[Any] = self._lock_file
__lowercase : List[str] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowercase : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowercase : Optional[Any] = id(self )
__lowercase : str = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
__lowercase : List[str] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : List[str] , __a : str , __a : int , __a : List[Any] ) -> Tuple:
"""simple docstring"""
self.release()
return None
def __del__( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.release(force=__a )
return None
def lowerCAmelCase ( self : Tuple , __a : str , __a : int ) -> str:
"""simple docstring"""
__lowercase : List[Any] = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
__lowercase : int = os.path.dirname(__a )
__lowercase : List[str] = str(hash(__a ) )
__lowercase : Optional[Any] = filename[: max_length - len(__a ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__a , __a )
else:
return path
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : List[Any] , __a : Optional[int]=-1 , __a : Tuple=None ) -> List[Any]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
__lowercase : Tuple = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowercase : Tuple = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
__lowercase : Union[str, Any] = fd
return None
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self._lock_file_fd
__lowercase : int = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : List[str] , __a : Optional[Any] , __a : str=-1 , __a : List[str]=None ) -> Any:
"""simple docstring"""
__lowercase : Dict = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowercase : List[str] = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
__lowercase : str = fd
return None
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = self._lock_file_fd
__lowercase : List[str] = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowercase : Union[str, Any] = os.open(self._lock_file , __a )
except OSError:
pass
else:
__lowercase : Optional[int] = fd
return None
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
os.close(self._lock_file_fd )
__lowercase : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCamelCase : Optional[Any] = None
if msvcrt:
lowerCamelCase : List[Any] = WindowsFileLock
elif fcntl:
lowerCamelCase : List[Any] = UnixFileLock
else:
lowerCamelCase : Union[str, Any] = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 649
| 1
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : str = None , __a : uuid.UUID = None , __a : Any=None , __a : List[Any]=None ) -> List[Any]:
"""simple docstring"""
if not conversation_id:
__lowercase : Any = uuid.uuida()
if past_user_inputs is None:
__lowercase : Dict = []
if generated_responses is None:
__lowercase : Dict = []
__lowercase : uuid.UUID = conversation_id
__lowercase : List[str] = past_user_inputs
__lowercase : List[str] = generated_responses
__lowercase : Optional[str] = text
def __eq__( self : Dict , __a : Dict ) -> Any:
"""simple docstring"""
if not isinstance(__a , __a ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase ( self : List[str] , __a : str , __a : bool = False ) -> Dict:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
__lowercase : Optional[int] = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
__lowercase : Dict = text
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowercase : Dict = None
def lowerCAmelCase ( self : Optional[int] , __a : str ) -> List[Any]:
"""simple docstring"""
self.generated_responses.append(__a )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : int ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
__lowercase : Optional[Any] = """user""" if is_user else """bot"""
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
__a , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , *__a : int , **__a : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*__a , **__a )
if self.tokenizer.pad_token_id is None:
__lowercase : List[Any] = self.tokenizer.eos_token
def lowerCAmelCase ( self : Union[str, Any] , __a : int=None , __a : Tuple=None , __a : Any=None , **__a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = {}
__lowercase : Tuple = {}
__lowercase : List[str] = {}
if min_length_for_response is not None:
__lowercase : Dict = min_length_for_response
if minimum_tokens is not None:
__lowercase : Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
__lowercase : Union[str, Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowercase : Union[str, Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__a )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[int] , __a : Union[Conversation, List[Conversation]] , __a : Dict=0 , **__a : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = super().__call__(__a , num_workers=__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase ( self : Union[str, Any] , __a : Conversation , __a : Tuple=32 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowercase : List[Any] = self.tokenizer._build_conversation_input_ids(__a )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowercase : Tuple = self._legacy_parse_and_tokenize(__a )
if self.framework == "pt":
__lowercase : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowercase : List[str] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase ( self : Any , __a : Dict , __a : Any=10 , **__a : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowercase : List[Any] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
__lowercase : Any = max_length - minimum_tokens
__lowercase : int = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowercase : Dict = model_inputs["""attention_mask"""][:, -trim:]
__lowercase : Union[str, Any] = model_inputs.pop("""conversation""" )
__lowercase : Tuple = max_length
__lowercase : int = self.model.generate(**__a , **__a )
if self.model.config.is_encoder_decoder:
__lowercase : Optional[int] = 1
else:
__lowercase : str = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase ( self : int , __a : Tuple , __a : List[Any]=True ) -> List[str]:
"""simple docstring"""
__lowercase : int = model_outputs["""output_ids"""]
__lowercase : Union[str, Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , )
__lowercase : List[str] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(__a )
return conversation
def lowerCAmelCase ( self : int , __a : Conversation ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = self.tokenizer.eos_token_id
__lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) )
if len(__a ) > self.tokenizer.model_max_length:
__lowercase : List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 649
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''layoutlmv3'''
def __init__( self : Dict , __a : List[str]=50265 , __a : str=768 , __a : List[Any]=12 , __a : List[Any]=12 , __a : List[str]=3072 , __a : Optional[Any]="gelu" , __a : Optional[int]=0.1 , __a : List[Any]=0.1 , __a : Tuple=512 , __a : int=2 , __a : Any=0.02 , __a : Union[str, Any]=1E-5 , __a : List[str]=1 , __a : List[Any]=0 , __a : int=2 , __a : str=1024 , __a : str=128 , __a : List[Any]=128 , __a : Tuple=True , __a : Optional[int]=32 , __a : Any=128 , __a : List[Any]=64 , __a : Tuple=256 , __a : str=True , __a : int=True , __a : Optional[Any]=True , __a : Any=224 , __a : str=3 , __a : List[str]=16 , __a : Union[str, Any]=None , **__a : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
vocab_size=__a , hidden_size=__a , num_hidden_layers=__a , num_attention_heads=__a , intermediate_size=__a , hidden_act=__a , hidden_dropout_prob=__a , attention_probs_dropout_prob=__a , max_position_embeddings=__a , type_vocab_size=__a , initializer_range=__a , layer_norm_eps=__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a , )
__lowercase : int = max_ad_position_embeddings
__lowercase : Any = coordinate_size
__lowercase : Optional[Any] = shape_size
__lowercase : str = has_relative_attention_bias
__lowercase : int = rel_pos_bins
__lowercase : Union[str, Any] = max_rel_pos
__lowercase : str = has_spatial_attention_bias
__lowercase : str = rel_ad_pos_bins
__lowercase : List[Any] = max_rel_ad_pos
__lowercase : Tuple = text_embed
__lowercase : int = visual_embed
__lowercase : Tuple = input_size
__lowercase : Dict = num_channels
__lowercase : str = patch_size
__lowercase : Optional[int] = classifier_dropout
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = version.parse('''1.12''' )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return 12
def lowerCAmelCase ( self : List[Any] , __a : "ProcessorMixin" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , __a )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase : Tuple = processor.tokenizer.num_special_tokens_to_add(__a )
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
__lowercase : Union[str, Any] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase : Tuple = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase : Tuple = self._generate_dummy_images(__a , __a , __a , __a )
__lowercase : int = dict(
processor(
__a , text=__a , boxes=__a , return_tensors=__a , ) )
return inputs
| 649
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : Optional[int] = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 649
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : str = None , __a : uuid.UUID = None , __a : Any=None , __a : List[Any]=None ) -> List[Any]:
"""simple docstring"""
if not conversation_id:
__lowercase : Any = uuid.uuida()
if past_user_inputs is None:
__lowercase : Dict = []
if generated_responses is None:
__lowercase : Dict = []
__lowercase : uuid.UUID = conversation_id
__lowercase : List[str] = past_user_inputs
__lowercase : List[str] = generated_responses
__lowercase : Optional[str] = text
def __eq__( self : Dict , __a : Dict ) -> Any:
"""simple docstring"""
if not isinstance(__a , __a ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase ( self : List[str] , __a : str , __a : bool = False ) -> Dict:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
__lowercase : Optional[int] = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
__lowercase : Dict = text
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowercase : Dict = None
def lowerCAmelCase ( self : Optional[int] , __a : str ) -> List[Any]:
"""simple docstring"""
self.generated_responses.append(__a )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : int ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
__lowercase : Optional[Any] = """user""" if is_user else """bot"""
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
__a , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , *__a : int , **__a : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*__a , **__a )
if self.tokenizer.pad_token_id is None:
__lowercase : List[Any] = self.tokenizer.eos_token
def lowerCAmelCase ( self : Union[str, Any] , __a : int=None , __a : Tuple=None , __a : Any=None , **__a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = {}
__lowercase : Tuple = {}
__lowercase : List[str] = {}
if min_length_for_response is not None:
__lowercase : Dict = min_length_for_response
if minimum_tokens is not None:
__lowercase : Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
__lowercase : Union[str, Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowercase : Union[str, Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__a )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[int] , __a : Union[Conversation, List[Conversation]] , __a : Dict=0 , **__a : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = super().__call__(__a , num_workers=__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase ( self : Union[str, Any] , __a : Conversation , __a : Tuple=32 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowercase : List[Any] = self.tokenizer._build_conversation_input_ids(__a )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowercase : Tuple = self._legacy_parse_and_tokenize(__a )
if self.framework == "pt":
__lowercase : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowercase : List[str] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase ( self : Any , __a : Dict , __a : Any=10 , **__a : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowercase : List[Any] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
__lowercase : Any = max_length - minimum_tokens
__lowercase : int = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowercase : Dict = model_inputs["""attention_mask"""][:, -trim:]
__lowercase : Union[str, Any] = model_inputs.pop("""conversation""" )
__lowercase : Tuple = max_length
__lowercase : int = self.model.generate(**__a , **__a )
if self.model.config.is_encoder_decoder:
__lowercase : Optional[int] = 1
else:
__lowercase : str = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase ( self : int , __a : Tuple , __a : List[Any]=True ) -> List[str]:
"""simple docstring"""
__lowercase : int = model_outputs["""output_ids"""]
__lowercase : Union[str, Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , )
__lowercase : List[str] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(__a )
return conversation
def lowerCAmelCase ( self : int , __a : Conversation ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = self.tokenizer.eos_token_id
__lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) )
if len(__a ) > self.tokenizer.model_max_length:
__lowercase : List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 649
| 1
|
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : Dict , __a : Optional[Any]=13 , __a : Optional[Any]=32 , __a : Union[str, Any]=3 , __a : int=4 , __a : Union[str, Any]=[10, 20, 30, 40] , __a : Optional[Any]=[2, 2, 3, 2] , __a : int=True , __a : str=True , __a : Dict=37 , __a : int="gelu" , __a : Union[str, Any]=10 , __a : Dict=0.02 , __a : Optional[int]=["stage2", "stage3", "stage4"] , __a : Optional[int]=[2, 3, 4] , __a : str=None , ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = parent
__lowercase : Union[str, Any] = batch_size
__lowercase : int = image_size
__lowercase : Any = num_channels
__lowercase : str = num_stages
__lowercase : str = hidden_sizes
__lowercase : Dict = depths
__lowercase : int = is_training
__lowercase : Dict = use_labels
__lowercase : Optional[int] = intermediate_size
__lowercase : List[Any] = hidden_act
__lowercase : int = num_labels
__lowercase : int = initializer_range
__lowercase : str = out_features
__lowercase : Dict = out_indices
__lowercase : List[str] = scope
def lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : Tuple = None
if self.use_labels:
__lowercase : str = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : str = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[int] , __a : Dict , __a : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = ConvNextVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Any = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase ( self : Optional[Any] , __a : Optional[Any] , __a : Optional[int] , __a : Dict ) -> List[str]:
"""simple docstring"""
__lowercase : str = ConvNextVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[Any] , __a : str , __a : Optional[Any] , __a : Dict ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = ConvNextVaBackbone(config=__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowercase : Union[str, Any] = None
__lowercase : Dict = ConvNextVaBackbone(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[int] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : Tuple = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase : str = config_and_inputs
__lowercase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase : str = config_and_inputs
__lowercase : List[str] = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_A : str = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_A : Optional[int] = False
_A : Tuple = False
_A : Dict = False
_A : Optional[int] = False
_A : int = False
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = ConvNextVaModelTester(self )
__lowercase : Optional[int] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_with_labels()
__lowercase : str = True
if model_class.__name__ in [
*get_values(__a ),
*get_values(__a ),
]:
continue
__lowercase : str = model_class(__a )
model.to(__a )
model.train()
__lowercase : str = self._prepare_for_class(__a , __a , return_labels=__a )
__lowercase : Tuple = model(**__a ).loss
loss.backward()
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__lowercase , __lowercase : List[str] = self.model_tester.prepare_config_and_inputs_with_labels()
__lowercase : int = False
__lowercase : Optional[Any] = True
if (
model_class.__name__
in [*get_values(__a ), *get_values(__a )]
or not model_class.supports_gradient_checkpointing
):
continue
__lowercase : Any = model_class(__a )
model.to(__a )
model.gradient_checkpointing_enable()
model.train()
__lowercase : Dict = self._prepare_for_class(__a , __a , return_labels=__a )
__lowercase : int = model(**__a ).loss
loss.backward()
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Optional[int] = [*signature.parameters.keys()]
__lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
def check_hidden_states_output(__a : Dict , __a : List[str] , __a : Tuple ):
__lowercase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowercase , __lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Tuple = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Dict = ConvNextVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(__a )
__lowercase : Union[str, Any] = self.default_image_processor
__lowercase : List[Any] = prepare_img()
__lowercase : Union[str, Any] = preprocessor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : int = model(**__a )
# verify the logits
__lowercase : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : Dict = torch.tensor([0.9996, 0.1966, -0.4386] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
| 649
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__a , """depth_multiplier""" ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Tuple , __a : str=13 , __a : Dict=3 , __a : List[Any]=32 , __a : Any=0.25 , __a : Any=8 , __a : Optional[int]=8 , __a : Optional[int]=6 , __a : Dict=32 , __a : Tuple=True , __a : List[Any]=True , __a : Optional[int]=True , __a : Tuple="relu6" , __a : Optional[Any]=1280 , __a : str=0.1 , __a : str=0.02 , __a : Optional[Any]=True , __a : Tuple=True , __a : Dict=10 , __a : Optional[Any]=None , ) -> Any:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Tuple = batch_size
__lowercase : Dict = num_channels
__lowercase : Optional[int] = image_size
__lowercase : int = depth_multiplier
__lowercase : str = depth_divisible_by
__lowercase : int = min_depth
__lowercase : Tuple = expand_ratio
__lowercase : Optional[int] = tf_padding
__lowercase : Dict = output_stride
__lowercase : Dict = first_layer_is_expansion
__lowercase : Optional[Any] = finegrained_output
__lowercase : str = hidden_act
__lowercase : Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__lowercase : Optional[int] = classifier_dropout_prob
__lowercase : int = use_labels
__lowercase : Optional[int] = is_training
__lowercase : Dict = num_labels
__lowercase : Tuple = initializer_range
__lowercase : Optional[Any] = scope
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : List[Any] = None
__lowercase : Optional[Any] = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = MobileNetVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase ( self : List[str] , __a : Optional[int] , __a : List[str] , __a : str , __a : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = self.num_labels
__lowercase : Dict = MobileNetVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __a : List[str] , __a : Tuple , __a : Any , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.num_labels
__lowercase : List[Any] = MobileNetVaForSemanticSegmentation(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase : str = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : List[str] = config_and_inputs
__lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : List[str] = False
_A : List[str] = False
_A : Optional[int] = False
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = MobileNetVaModelTester(self )
__lowercase : int = MobileNetVaConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : int = [*signature.parameters.keys()]
__lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__a : List[Any] , __a : Tuple , __a : List[str] ):
__lowercase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Tuple = outputs.hidden_states
__lowercase : str = 16
self.assertEqual(len(__a ) , __a )
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Any = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = MobileNetVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Tuple = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__a )
__lowercase : str = self.default_image_processor
__lowercase : Tuple = prepare_img()
__lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : str = model(**__a )
# verify the logits
__lowercase : Union[str, Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : int = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : Dict = model.to(__a )
__lowercase : Tuple = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[int] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Any = outputs.logits
# verify the logits
__lowercase : Dict = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __a )
__lowercase : str = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) )
| 649
| 1
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : list ):
_enforce_args(lowerCAmelCase_ , lowerCAmelCase_ )
if n == 0:
return 0
__lowercase : List[str] = float("""-inf""" )
for i in range(1 , n + 1 ):
__lowercase : Optional[int] = max(
lowerCAmelCase_ , prices[i - 1] + naive_cut_rod_recursive(n - i , lowerCAmelCase_ ) )
return max_revue
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : list ):
_enforce_args(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : str = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : list , lowerCAmelCase_ : list ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__lowercase : Any = float("""-inf""" )
for i in range(1 , n + 1 ):
__lowercase : str = max(
lowerCAmelCase_ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowerCAmelCase_ , lowerCAmelCase_ ) , )
__lowercase : Any = max_revenue
return max_rev[n]
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : list ):
_enforce_args(lowerCAmelCase_ , lowerCAmelCase_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__lowercase : Union[str, Any] = [float("""-inf""" ) for _ in range(n + 1 )]
__lowercase : Optional[Any] = 0
for i in range(1 , n + 1 ):
__lowercase : List[str] = max_rev[i]
for j in range(1 , i + 1 ):
__lowercase : Tuple = max(lowerCAmelCase_ , prices[j - 1] + max_rev[i - j] )
__lowercase : Optional[Any] = max_revenue_i
return max_rev[n]
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : list ):
if n < 0:
__lowercase : Dict = F"n must be greater than or equal to 0. Got n = {n}"
raise ValueError(lowerCAmelCase_ )
if n > len(lowerCAmelCase_ ):
__lowercase : Optional[int] = (
"""Each integral piece of rod must have a corresponding price. """
F"Got n = {n} but length of prices = {len(lowerCAmelCase_ )}"
)
raise ValueError(lowerCAmelCase_ )
def snake_case_ ( ):
__lowercase : Dict = [6, 10, 12, 15, 20, 23]
__lowercase : Union[str, Any] = len(lowerCAmelCase_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__lowercase : str = 36
__lowercase : str = top_down_cut_rod(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : int = bottom_up_cut_rod(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = naive_cut_rod_recursive(lowerCAmelCase_ , lowerCAmelCase_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 649
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def snake_case_ ( lowerCAmelCase_ : bool = True , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[str] ):
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
__lowercase : List[str] = False
if main_process_only:
__lowercase : Optional[int] = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ , disable=lowerCAmelCase_ )
| 649
| 1
|
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase : Optional[int] = HUGGINGFACE_HUB_CACHE
lowerCamelCase : Tuple = '''config.json'''
lowerCamelCase : List[Any] = '''diffusion_pytorch_model.bin'''
lowerCamelCase : Union[str, Any] = '''diffusion_flax_model.msgpack'''
lowerCamelCase : Union[str, Any] = '''model.onnx'''
lowerCamelCase : List[Any] = '''diffusion_pytorch_model.safetensors'''
lowerCamelCase : Union[str, Any] = '''weights.pb'''
lowerCamelCase : Dict = '''https://huggingface.co'''
lowerCamelCase : Dict = default_cache_path
lowerCamelCase : Tuple = '''diffusers_modules'''
lowerCamelCase : Any = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
lowerCamelCase : str = ['''fp16''', '''non-ema''']
lowerCamelCase : Optional[int] = '''.self_attn'''
| 649
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : list[int] ):
if not nums:
return 0
__lowercase : Tuple = nums[0]
__lowercase : Tuple = 0
for num in nums[1:]:
__lowercase , __lowercase : List[str] = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
| 1
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCamelCase : Any = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = ['''pixel_values''']
def __init__( self : Tuple , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , **__a : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : List[str] = size if size is not None else {"""shortest_edge""": 224}
__lowercase : Dict = get_size_dict(__a , default_to_square=__a )
__lowercase : Any = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
__lowercase : str = get_size_dict(__a , param_name="""crop_size""" )
__lowercase : Optional[Any] = do_resize
__lowercase : int = size
__lowercase : int = resample
__lowercase : Union[str, Any] = do_rescale
__lowercase : List[Any] = rescale_factor
__lowercase : List[str] = do_center_crop
__lowercase : Optional[int] = crop_size
__lowercase : List[Any] = do_flip_channel_order
def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PIL.Image.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : int , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Optional[Any] = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase : List[Any] = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[Any] , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Dict , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Optional[int] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[Any] , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[Any] , __a : np.ndarray , __a : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray:
"""simple docstring"""
return flip_channel_order(__a , data_format=__a )
def lowerCAmelCase ( self : Any , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : str , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : Optional[int] = do_resize if do_resize is not None else self.do_resize
__lowercase : str = resample if resample is not None else self.resample
__lowercase : int = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : Optional[int] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
__lowercase : Optional[Any] = size if size is not None else self.size
__lowercase : List[Any] = get_size_dict(__a , default_to_square=__a )
__lowercase : List[str] = crop_size if crop_size is not None else self.crop_size
__lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" )
__lowercase : Union[str, Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
__lowercase : Any = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowercase : Any = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__lowercase : Optional[Any] = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__lowercase : str = [self.rescale(image=__a , scale=__a ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
__lowercase : Any = [self.flip_channel_order(image=__a ) for image in images]
__lowercase : List[str] = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Tuple = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a )
def lowerCAmelCase ( self : Any , __a : str , __a : List[Tuple] = None ) -> Dict:
"""simple docstring"""
__lowercase : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__a ) != len(__a ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__a ):
__lowercase : List[Any] = target_sizes.numpy()
__lowercase : Dict = []
for idx in range(len(__a ) ):
__lowercase : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__a )
__lowercase : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__a )
else:
__lowercase : Dict = logits.argmax(dim=1 )
__lowercase : Any = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 649
|
lowerCamelCase : List[str] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 649
| 1
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(0 ) == 0 )
def snake_case_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 649
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Any=False ):
__lowercase : Any = """backbone.""" if is_semantic else """"""
__lowercase : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", """beit.embeddings.cls_token"""),
(F"{prefix}patch_embed.proj.weight", """beit.embeddings.patch_embeddings.projection.weight"""),
(F"{prefix}patch_embed.proj.bias", """beit.embeddings.patch_embeddings.projection.bias"""),
(F"{prefix}pos_embed", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : List[Any]=False ):
for i in range(config.num_hidden_layers ):
__lowercase : Tuple = """backbone.""" if is_semantic else """"""
# queries, keys and values
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
__lowercase : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
__lowercase : List[str] = in_proj_weight[
: config.hidden_size, :
]
__lowercase : Union[str, Any] = q_bias
__lowercase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
__lowercase : str = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
__lowercase : str = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
__lowercase : List[str] = gamma_a
__lowercase : Optional[int] = gamma_a
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
__lowercase : Tuple = dct.pop(lowerCAmelCase_ )
__lowercase : Tuple = val
def snake_case_ ( ):
__lowercase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : Any = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=False ):
__lowercase : Dict = False if """rvlcdip""" in checkpoint_url else True
__lowercase : Tuple = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase_ , use_mask_token=lowerCAmelCase_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__lowercase : Union[str, Any] = 1024
__lowercase : Optional[int] = 4096
__lowercase : List[Any] = 24
__lowercase : Dict = 16
# labels
if "rvlcdip" in checkpoint_url:
__lowercase : Optional[int] = 16
__lowercase : Any = """huggingface/label-files"""
__lowercase : Union[str, Any] = """rvlcdip-id2label.json"""
__lowercase : List[str] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Optional[int] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__lowercase : Optional[int] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )["""model"""]
__lowercase : Union[str, Any] = create_rename_keys(lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
# load HuggingFace model
__lowercase : Dict = BeitForMaskedImageModeling(lowerCAmelCase_ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
__lowercase : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" )
__lowercase : Optional[int] = encoding["""pixel_values"""]
__lowercase : str = model(lowerCAmelCase_ )
__lowercase : Tuple = outputs.logits
# verify logits
__lowercase : str = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowerCAmelCase_ ), "Shape of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
if has_lm_head:
__lowercase : Optional[Any] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
__lowercase : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase_ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
lowerCamelCase : List[str] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 649
| 1
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , __a : Dict , __a : Union[str, Any]=13 , __a : str=32 , __a : int=2 , __a : List[str]=3 , __a : Optional[int]=16 , __a : List[Any]=[1, 2, 1] , __a : Optional[Any]=[2, 2, 4] , __a : Any=2 , __a : Dict=2.0 , __a : Any=True , __a : Optional[int]=0.0 , __a : Union[str, Any]=0.0 , __a : Union[str, Any]=0.1 , __a : Optional[Any]="gelu" , __a : int=False , __a : str=True , __a : Union[str, Any]=0.02 , __a : Tuple=1E-5 , __a : str=True , __a : Optional[Any]=None , __a : Optional[Any]=True , __a : Optional[int]=10 , __a : Optional[int]=8 , __a : List[Any]=["stage1", "stage2", "stage3"] , __a : List[str]=[1, 2, 3] , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = parent
__lowercase : List[str] = batch_size
__lowercase : Tuple = image_size
__lowercase : Any = patch_size
__lowercase : Dict = num_channels
__lowercase : int = embed_dim
__lowercase : Dict = depths
__lowercase : Optional[Any] = num_heads
__lowercase : Union[str, Any] = window_size
__lowercase : Dict = mlp_ratio
__lowercase : List[Any] = qkv_bias
__lowercase : str = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : Tuple = drop_path_rate
__lowercase : Dict = hidden_act
__lowercase : Union[str, Any] = use_absolute_embeddings
__lowercase : Any = patch_norm
__lowercase : Optional[int] = layer_norm_eps
__lowercase : Dict = initializer_range
__lowercase : Any = is_training
__lowercase : List[str] = scope
__lowercase : int = use_labels
__lowercase : List[Any] = type_sequence_label_size
__lowercase : List[Any] = encoder_stride
__lowercase : Any = out_features
__lowercase : List[str] = out_indices
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : Any = None
if self.use_labels:
__lowercase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Dict = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCAmelCase ( self : int , __a : str , __a : Tuple , __a : str ) -> Dict:
"""simple docstring"""
__lowercase : Optional[Any] = MaskFormerSwinModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
__lowercase : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowercase : Optional[int] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Optional[int] , __a : str ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = MaskFormerSwinBackbone(config=__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(__a ):
__lowercase : Optional[int] = ["""stem"""]
__lowercase : int = MaskFormerSwinBackbone(config=__a )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Tuple = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase : int = config_and_inputs
__lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_A : int = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
_A : int = False
_A : int = False
_A : int = False
_A : Tuple = False
_A : Optional[int] = False
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = MaskFormerSwinModelTester(self )
__lowercase : Union[str, Any] = ConfigTester(self , config_class=__a , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
return
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
@unittest.skip("""Swin does not use inputs_embeds""" )
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[int] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Any = [*signature.parameters.keys()]
__lowercase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Tuple , __a : Any , __a : Any , __a : List[Any] , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : List[str] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : List[Any] = outputs.hidden_states
__lowercase : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# Swin has a different seq_length
__lowercase : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase , __lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowercase : Optional[int] = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : List[Any] = True
self.check_hidden_states_output(__a , __a , __a , __a )
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Tuple = 3
__lowercase : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowercase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase : Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowercase : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowercase : Dict = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Optional[int] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__a : Optional[Any] ):
__lowercase : Union[str, Any] = 0
return t
def check_equivalence(__a : Optional[Any] , __a : Tuple , __a : Union[str, Any] , __a : List[Any]={} ):
with torch.no_grad():
__lowercase : List[str] = model(**__a , return_dict=__a , **__a )
__lowercase : Dict = model(**__a , return_dict=__a , **__a ).to_tuple()
def recursive_check(__a : Optional[int] , __a : int ):
if isinstance(__a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__a , __a ):
recursive_check(__a , __a )
elif isinstance(__a , __a ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__a , __a )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__a ) , set_nan_tensor_to_zero(__a ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(__a ).any()} and `inf`: {torch.isinf(__a )}. Dict has"
F" `nan`: {torch.isnan(__a ).any()} and `inf`: {torch.isinf(__a )}."
) , )
recursive_check(__a , __a )
for model_class in self.all_model_classes:
__lowercase : Optional[int] = model_class(__a )
model.to(__a )
model.eval()
__lowercase : int = self._prepare_for_class(__a , __a )
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
check_equivalence(__a , __a , __a )
__lowercase : List[Any] = self._prepare_for_class(__a , __a , return_labels=__a )
__lowercase : List[str] = self._prepare_for_class(__a , __a , return_labels=__a )
check_equivalence(__a , __a , __a )
__lowercase : Optional[int] = self._prepare_for_class(__a , __a )
__lowercase : List[Any] = self._prepare_for_class(__a , __a )
check_equivalence(__a , __a , __a , {"""output_hidden_states""": True} )
__lowercase : List[Any] = self._prepare_for_class(__a , __a , return_labels=__a )
__lowercase : Tuple = self._prepare_for_class(__a , __a , return_labels=__a )
check_equivalence(__a , __a , __a , {"""output_hidden_states""": True} )
@require_torch
class lowerCAmelCase ( unittest.TestCase , __a ):
'''simple docstring'''
_A : List[str] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_A : List[Any] = MaskFormerSwinConfig
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = MaskFormerSwinModelTester(self )
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : str = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__lowercase : Tuple = backbone_class(__a )
backbone.to(__a )
backbone.eval()
__lowercase : Union[str, Any] = backbone(**__a )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __a )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowercase : Optional[Any] = backbone(**__a , output_hidden_states=__a )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowercase , __lowercase , __lowercase : str = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowercase : int = backbone(**__a , output_attentions=__a )
self.assertIsNotNone(outputs.attentions )
| 649
|
from torch import nn
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __a : int , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowercase : int = class_size
__lowercase : int = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowercase : str = nn.Linear(__a , __a )
def lowerCAmelCase ( self : Tuple , __a : int ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.mlp(__a )
return logits
| 649
| 1
|
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCamelCase : Union[str, Any] = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
lowerCamelCase : Optional[Any] = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
lowerCamelCase : List[str] = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def lowerCAmelCase ( self : str , __a : Any , __a : int , __a : Optional[int]=None , __a : Optional[Any]=None , __a : int=None , __a : List[Any]=None , __a : Tuple="auto" , __a : Union[str, Any]=-1 , __a : Union[str, Any]=0.9 , __a : List[str]=5 , __a : Tuple=500 , __a : Optional[int]="gpt2-large" , __a : Any=-1 , __a : int=1024 , __a : List[str]=25 , __a : int=5 , __a : List[str]=True , __a : str=25 , ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = compute_mauve(
p_text=__a , q_text=__a , p_features=__a , q_features=__a , p_tokens=__a , q_tokens=__a , num_buckets=__a , pca_max_data=__a , kmeans_explained_var=__a , kmeans_num_redo=__a , kmeans_max_iter=__a , featurize_model_name=__a , device_id=__a , max_text_length=__a , divergence_curve_discretization_size=__a , mauve_scaling_factor=__a , verbose=__a , seed=__a , )
return out
| 649
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : str ):
__lowercase : Tuple = [x.strip() for x in open(lowerCAmelCase_ ).readlines()]
__lowercase : Dict = [x.strip() for x in open(lowerCAmelCase_ ).readlines()][: len(lowerCAmelCase_ )]
__lowercase : Tuple = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
if save_path is not None:
save_json(lowerCAmelCase_ , lowerCAmelCase_ , indent=lowerCAmelCase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 649
| 1
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase : int = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase : int = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : List[Any] = len([g for position, g in enumerate(lowerCAmelCase_ ) if g == main_target[position]] )
return (item, float(lowerCAmelCase_ ))
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Tuple = random.randint(0 , len(lowerCAmelCase_ ) - 1 )
__lowercase : Any = parent_a[:random_slice] + parent_a[random_slice:]
__lowercase : int = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] ):
__lowercase : Union[str, Any] = list(lowerCAmelCase_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__lowercase : Tuple = random.choice(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : tuple[str, float] , lowerCAmelCase_ : list[tuple[str, float]] , lowerCAmelCase_ : list[str] , ):
__lowercase : List[Any] = []
# Generate more children proportionally to the fitness score.
__lowercase : Optional[Any] = int(parent_a[1] * 100 ) + 1
__lowercase : Optional[int] = 10 if child_n >= 10 else child_n
for _ in range(lowerCAmelCase_ ):
__lowercase : Union[str, Any] = population_score[random.randint(0 , lowerCAmelCase_ )][0]
__lowercase , __lowercase : str = crossover(parent_a[0] , lowerCAmelCase_ )
# Append new string to the population list.
pop.append(mutate(lowerCAmelCase_ , lowerCAmelCase_ ) )
pop.append(mutate(lowerCAmelCase_ , lowerCAmelCase_ ) )
return pop
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] , lowerCAmelCase_ : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__lowercase : str = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(lowerCAmelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
__lowercase : Union[str, Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__lowercase : List[Any] = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(lowerCAmelCase_ )
# Generate random starting population.
__lowercase : Optional[int] = []
for _ in range(lowerCAmelCase_ ):
population.append("""""".join([random.choice(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
__lowercase , __lowercase : List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCAmelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__lowercase : Union[str, Any] = [evaluate(lowerCAmelCase_ , lowerCAmelCase_ ) for item in population]
# Check if there is a matching evolution.
__lowercase : Optional[Any] = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x[1] , reverse=lowerCAmelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__lowercase : List[str] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCAmelCase_ )
# Normalize population score to be between 0 and 1.
__lowercase : Any = [
(item, score / len(lowerCAmelCase_ )) for item, score in population_score
]
# This is selection
for i in range(lowerCAmelCase_ ):
population.extend(select(population_score[int(lowerCAmelCase_ )] , lowerCAmelCase_ , lowerCAmelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCAmelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase : Any = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
lowerCamelCase : Union[str, Any] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
lowerCamelCase ,lowerCamelCase ,lowerCamelCase : List[str] = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 649
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case_ ( lowerCAmelCase_ : Dict ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase ( __a ):
'''simple docstring'''
@staticmethod
def lowerCAmelCase ( __a : ArgumentParser ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=__a , default=__a , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=__a , help="""Name of the model to download""" )
download_parser.set_defaults(func=__a )
def __init__( self : Dict , __a : str , __a : str , __a : bool , __a : bool ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = model
__lowercase : List[Any] = cache
__lowercase : Any = force
__lowercase : Optional[int] = trust_remote_code
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 649
| 1
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCamelCase : Any = None
try:
import msvcrt
except ImportError:
lowerCamelCase : str = None
try:
import fcntl
except ImportError:
lowerCamelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
lowerCamelCase : Tuple = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
lowerCamelCase : Tuple = '''3.0.12'''
lowerCamelCase : Any = None
def snake_case_ ( ):
global _logger
__lowercase : List[str] = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , __a : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = lock_file
return None
def __str__( self : str ) -> Any:
"""simple docstring"""
__lowercase : Any = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = lock
return None
def __enter__( self : Dict ) -> Dict:
"""simple docstring"""
return self.lock
def __exit__( self : Optional[int] , __a : Dict , __a : Any , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.lock.release()
return None
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __a : Any , __a : Dict=-1 , __a : Optional[Any]=None ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowercase : Dict = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
__lowercase : Optional[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowercase : int = None
# The default timeout value.
__lowercase : Optional[int] = timeout
# We use this lock primarily for the lock counter.
__lowercase : Optional[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowercase : Union[str, Any] = 0
return None
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self._lock_file
@property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self._timeout
@timeout.setter
def lowerCAmelCase ( self : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = float(__a )
return None
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@property
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self._lock_file_fd is not None
def lowerCAmelCase ( self : Any , __a : Optional[Any]=None , __a : Union[str, Any]=0.05 ) -> List[str]:
"""simple docstring"""
if timeout is None:
__lowercase : Union[str, Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowercase : int = id(self )
__lowercase : Optional[Any] = self._lock_file
__lowercase : List[str] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowercase : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowercase : Optional[Any] = id(self )
__lowercase : str = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
__lowercase : List[str] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : List[str] , __a : str , __a : int , __a : List[Any] ) -> Tuple:
"""simple docstring"""
self.release()
return None
def __del__( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.release(force=__a )
return None
def lowerCAmelCase ( self : Tuple , __a : str , __a : int ) -> str:
"""simple docstring"""
__lowercase : List[Any] = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
__lowercase : int = os.path.dirname(__a )
__lowercase : List[str] = str(hash(__a ) )
__lowercase : Optional[Any] = filename[: max_length - len(__a ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__a , __a )
else:
return path
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : List[Any] , __a : Optional[int]=-1 , __a : Tuple=None ) -> List[Any]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
__lowercase : Tuple = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowercase : Tuple = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
__lowercase : Union[str, Any] = fd
return None
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self._lock_file_fd
__lowercase : int = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : List[str] , __a : Optional[Any] , __a : str=-1 , __a : List[str]=None ) -> Any:
"""simple docstring"""
__lowercase : Dict = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowercase : List[str] = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
__lowercase : str = fd
return None
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = self._lock_file_fd
__lowercase : List[str] = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowercase : Union[str, Any] = os.open(self._lock_file , __a )
except OSError:
pass
else:
__lowercase : Optional[int] = fd
return None
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
os.close(self._lock_file_fd )
__lowercase : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCamelCase : Optional[Any] = None
if msvcrt:
lowerCamelCase : List[Any] = WindowsFileLock
elif fcntl:
lowerCamelCase : List[Any] = UnixFileLock
else:
lowerCamelCase : Union[str, Any] = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 649
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCamelCase : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __a : List[str] , __a : Optional[int]=16 , __a : Optional[Any]=13 , __a : str=7 , __a : List[str]=14 , __a : Any=10 , __a : str=19 , __a : int=5 , __a : Any=4 , __a : List[Any]=True , __a : Tuple=16 , __a : Dict=2 , __a : Tuple=4 , __a : int=4 , __a : List[Any]="gelu" , __a : Tuple=0.1 , __a : List[str]=0.1 , __a : int=[1, 2, 3, 4, 5] , __a : str=25 , __a : Any=5 , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = d_model
__lowercase : Dict = parent
__lowercase : Tuple = batch_size
__lowercase : Optional[int] = prediction_length
__lowercase : List[str] = context_length
__lowercase : Any = cardinality
__lowercase : str = num_time_features
__lowercase : Optional[int] = lags_sequence
__lowercase : Optional[Any] = embedding_dimension
__lowercase : List[Any] = is_training
__lowercase : List[str] = hidden_size
__lowercase : int = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : int = hidden_act
__lowercase : str = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : str = context_length
__lowercase : int = prediction_length + label_length
__lowercase : Union[str, Any] = label_length
__lowercase : Optional[int] = moving_average
__lowercase : Optional[Any] = autocorrelation_factor
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCAmelCase ( self : Tuple , __a : str ) -> int:
"""simple docstring"""
__lowercase : Any = config.context_length + max(config.lags_sequence )
__lowercase : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowercase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowercase : Dict = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowercase : str = floats_tensor([self.batch_size, config.prediction_length] )
__lowercase : List[str] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_config()
__lowercase : Any = self.prepare_autoformer_inputs_dict(__a )
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] , __a : Tuple , __a : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel(config=__a ).to(__a ).eval()
__lowercase : Optional[int] = model(**__a )
__lowercase : Dict = outputs.encoder_last_hidden_state
__lowercase : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : List[str] = model.get_encoder()
encoder.save_pretrained(__a )
__lowercase : List[str] = AutoformerEncoder.from_pretrained(__a ).to(__a )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Any = model.create_network_inputs(**__a )
__lowercase , __lowercase : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowercase : Optional[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowercase : Union[str, Any] = encoder(inputs_embeds=__a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__lowercase : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowercase : Optional[int] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowercase : Any = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowercase : Dict = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Optional[Any] = model.get_decoder()
decoder.save_pretrained(__a )
__lowercase : Tuple = AutoformerDecoder.from_pretrained(__a ).to(__a )
__lowercase : str = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_A : List[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_A : Any = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_A : Dict = False
_A : Tuple = False
_A : Optional[int] = False
_A : Tuple = False
_A : str = False
_A : Union[str, Any] = False
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : List[str] = AutoformerModelTester(self )
__lowercase : Dict = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase , __lowercase : Tuple = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : Any = inspect.signature(getattr(__a , """forward""" ) )
# The main input is the name of the argument after `self`
__lowercase : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __a )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
__lowercase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Any = [*signature.parameters.keys()]
__lowercase : int = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__a )] , __a )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : int = True
__lowercase : Tuple = getattr(self.model_tester , """seq_length""" , __a )
__lowercase : Union[str, Any] = getattr(self.model_tester , """decoder_seq_length""" , __a )
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """d_model""" , __a )
__lowercase : Optional[int] = getattr(self.model_tester , """num_attention_heads""" , __a )
__lowercase : Any = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowercase : Dict = True
__lowercase : List[str] = False
__lowercase : Optional[int] = True
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : int = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase : Optional[int] = True
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Dict = outputs.encoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowercase : Tuple = len(__a )
__lowercase : str = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a )
# decoder attentions
__lowercase : List[Any] = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowercase : Optional[int] = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowercase : Tuple = True
__lowercase : Union[str, Any] = True
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Any = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 2 , len(__a ) )
__lowercase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def snake_case_ ( lowerCAmelCase_ : Optional[int]="train-batch.pt" ):
__lowercase : Dict = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=lowerCAmelCase_ , repo_type="""dataset""" )
__lowercase : Optional[int] = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
return batch
@require_torch
@slow
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[Any] = prepare_batch()
with torch.no_grad():
__lowercase : Tuple = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowercase : List[str] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[str] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : Optional[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowercase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : Optional[int] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : int = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowercase : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __a )
__lowercase : Optional[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__a )
__lowercase : Dict = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1E-1 ) )
| 649
| 1
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : torch.FloatTensor
class lowerCAmelCase ( __a , __a ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[int] , __a : int = 32 , __a : int = 64 , __a : int = 20 , __a : int = 768 , __a : List[Any]=77 , __a : Tuple=4 , __a : float = 0.0 , __a : str = "silu" , __a : Optional[str] = None , __a : Optional[str] = None , __a : Optional[str] = "linear" , __a : Optional[str] = "prd" , __a : Optional[int] = None , __a : Optional[int] = None , __a : Optional[int] = None , ) -> int:
"""simple docstring"""
super().__init__()
__lowercase : int = num_attention_heads
__lowercase : Optional[int] = attention_head_dim
__lowercase : Any = num_attention_heads * attention_head_dim
__lowercase : Union[str, Any] = additional_embeddings
__lowercase : Dict = time_embed_dim or inner_dim
__lowercase : Optional[Any] = embedding_proj_dim or embedding_dim
__lowercase : int = clip_embed_dim or embedding_dim
__lowercase : int = Timesteps(__a , __a , 0 )
__lowercase : Union[str, Any] = TimestepEmbedding(__a , __a , out_dim=__a , act_fn=__a )
__lowercase : Optional[int] = nn.Linear(__a , __a )
if embedding_proj_norm_type is None:
__lowercase : List[str] = None
elif embedding_proj_norm_type == "layer":
__lowercase : Any = nn.LayerNorm(__a )
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
__lowercase : Union[str, Any] = nn.Linear(__a , __a )
if encoder_hid_proj_type is None:
__lowercase : Tuple = None
elif encoder_hid_proj_type == "linear":
__lowercase : Dict = nn.Linear(__a , __a )
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
__lowercase : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __a ) )
if added_emb_type == "prd":
__lowercase : List[Any] = nn.Parameter(torch.zeros(1 , 1 , __a ) )
elif added_emb_type is None:
__lowercase : Dict = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
__lowercase : Optional[Any] = nn.ModuleList(
[
BasicTransformerBlock(
__a , __a , __a , dropout=__a , activation_fn="""gelu""" , attention_bias=__a , )
for d in range(__a )
] )
if norm_in_type == "layer":
__lowercase : Union[str, Any] = nn.LayerNorm(__a )
elif norm_in_type is None:
__lowercase : Dict = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." )
__lowercase : Any = nn.LayerNorm(__a )
__lowercase : Any = nn.Linear(__a , __a )
__lowercase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
__lowercase : Union[str, Any] = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , __a , persistent=__a )
__lowercase : Tuple = nn.Parameter(torch.zeros(1 , __a ) )
__lowercase : str = nn.Parameter(torch.zeros(1 , __a ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
__lowercase : Dict = {}
def fn_recursive_add_processors(__a : str , __a : torch.nn.Module , __a : Dict[str, AttentionProcessor] ):
if hasattr(__a , """set_processor""" ):
__lowercase : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , __a , __a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__a , __a , __a )
return processors
def lowerCAmelCase ( self : Optional[int] , __a : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = len(self.attn_processors.keys() )
if isinstance(__a , __a ) and len(__a ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(__a )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(__a : str , __a : torch.nn.Module , __a : Dict ):
if hasattr(__a , """set_processor""" ):
if not isinstance(__a , __a ):
module.set_processor(__a )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , __a , __a )
for name, module in self.named_children():
fn_recursive_attn_processor(__a , __a , __a )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
def lowerCAmelCase ( self : int , __a : str , __a : Union[torch.Tensor, float, int] , __a : torch.FloatTensor , __a : Optional[torch.FloatTensor] = None , __a : Optional[torch.BoolTensor] = None , __a : bool = True , ) -> Tuple:
"""simple docstring"""
__lowercase : str = hidden_states.shape[0]
__lowercase : int = timestep
if not torch.is_tensor(__a ):
__lowercase : Tuple = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__a ) and len(timesteps.shape ) == 0:
__lowercase : Optional[Any] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowercase : Any = timesteps * torch.ones(__a , dtype=timesteps.dtype , device=timesteps.device )
__lowercase : Any = self.time_proj(__a )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__lowercase : int = timesteps_projected.to(dtype=self.dtype )
__lowercase : List[Any] = self.time_embedding(__a )
if self.embedding_proj_norm is not None:
__lowercase : Optional[Any] = self.embedding_proj_norm(__a )
__lowercase : Tuple = self.embedding_proj(__a )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__lowercase : Dict = self.encoder_hidden_states_proj(__a )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
__lowercase : List[Any] = self.proj_in(__a )
__lowercase : List[str] = self.positional_embedding.to(hidden_states.dtype )
__lowercase : int = []
__lowercase : Dict = 0
if encoder_hidden_states is not None:
additional_embeds.append(__a )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__lowercase : Optional[int] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__lowercase : Optional[Any] = hidden_states[:, None, :]
__lowercase : Union[str, Any] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__lowercase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(__a , -1 , -1 )
additional_embeds.append(__a )
__lowercase : str = torch.cat(
__a , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__lowercase : List[Any] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__lowercase : str = F.pad(
__a , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__lowercase : str = hidden_states + positional_embeddings
if attention_mask is not None:
__lowercase : List[str] = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
__lowercase : Union[str, Any] = F.pad(__a , (0, self.additional_embeddings) , value=0.0 )
__lowercase : List[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__lowercase : Tuple = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__lowercase : Optional[Any] = self.norm_in(__a )
for block in self.transformer_blocks:
__lowercase : Optional[Any] = block(__a , attention_mask=__a )
__lowercase : Union[str, Any] = self.norm_out(__a )
if self.prd_embedding is not None:
__lowercase : Optional[Any] = hidden_states[:, -1]
else:
__lowercase : int = hidden_states[:, additional_embeddings_len:]
__lowercase : Optional[int] = self.proj_to_clip_embeddings(__a )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__a )
def lowerCAmelCase ( self : List[str] , __a : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 649
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCAmelCase ( __a , __a ):
'''simple docstring'''
_A : str = 1
@register_to_config
def __init__( self : Optional[int] , __a : Tuple=2000 , __a : List[str]=0.1 , __a : str=20 , __a : Optional[int]=1E-3 ) -> int:
"""simple docstring"""
__lowercase : Tuple = None
__lowercase : Union[str, Any] = None
__lowercase : int = None
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Union[str, torch.device] = None ) -> str:
"""simple docstring"""
__lowercase : List[str] = torch.linspace(1 , self.config.sampling_eps , __a , device=__a )
def lowerCAmelCase ( self : Tuple , __a : List[Any] , __a : Tuple , __a : int , __a : Optional[int]=None ) -> str:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowercase : Dict = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowercase : int = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowercase : Union[str, Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowercase : Optional[Any] = std.unsqueeze(-1 )
__lowercase : List[Any] = -score / std
# compute
__lowercase : Dict = -1.0 / len(self.timesteps )
__lowercase : int = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowercase : List[Any] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowercase : Union[str, Any] = beta_t.unsqueeze(-1 )
__lowercase : List[str] = -0.5 * beta_t * x
__lowercase : int = torch.sqrt(__a )
__lowercase : Union[str, Any] = drift - diffusion**2 * score
__lowercase : Optional[Any] = x + drift * dt
# add noise
__lowercase : List[str] = randn_tensor(x.shape , layout=x.layout , generator=__a , device=x.device , dtype=x.dtype )
__lowercase : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.config.num_train_timesteps
| 649
| 1
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCamelCase : List[str] = False
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__lowercase : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
__lowercase : List[Any] = torch.manual_seed(0 )
__lowercase : List[str] = pipe.dual_guided(
prompt="""first prompt""" , image=__a , text_to_image_strength=0.75 , generator=__a , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__a )
__lowercase : Any = VersatileDiffusionPipeline.from_pretrained(__a , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Tuple = generator.manual_seed(0 )
__lowercase : Optional[int] = pipe.dual_guided(
prompt="""first prompt""" , image=__a , text_to_image_strength=0.75 , generator=__a , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Tuple = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__lowercase : int = """cyberpunk 2077"""
__lowercase : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
__lowercase : Tuple = torch.manual_seed(0 )
__lowercase : Dict = pipe.dual_guided(
prompt=__a , image=__a , text_to_image_strength=0.75 , generator=__a , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
__lowercase : Optional[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase : str = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase : Tuple = """A painting of a squirrel eating a burger """
__lowercase : Optional[int] = torch.manual_seed(0 )
__lowercase : str = pipe.text_to_image(
prompt=__a , generator=__a , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
__lowercase : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase : List[str] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase : Tuple = pipe.image_variation(__a , generator=__a , output_type="""numpy""" ).images
__lowercase : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase : Optional[int] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 649
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : str = LongformerTokenizer
_A : int = True
_A : Optional[int] = LongformerTokenizerFast
_A : int = True
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowercase : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
__lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def lowerCAmelCase ( self : Optional[int] , **__a : Optional[Any] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Tuple , **__a : Tuple ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = """lower newer"""
__lowercase : int = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase : Dict = """lower newer"""
__lowercase : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowercase : str = tokenizer.tokenize(__a ) # , add_prefix_space=True)
self.assertListEqual(__a , __a )
__lowercase : int = tokens + [tokenizer.unk_token]
__lowercase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
__lowercase : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__a )
__lowercase : List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a )
__lowercase : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__lowercase : Any = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Tuple = """Encode this sequence."""
__lowercase : Optional[Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
__lowercase : Dict = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__a , __a )
__lowercase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__a , __a )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__lowercase : str = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__a , __a )
# Testing spaces after special tokens
__lowercase : List[Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__a , lstrip=__a , rstrip=__a )} ) # mask token has a left space
__lowercase : Dict = tokenizer.convert_tokens_to_ids(__a )
__lowercase : List[str] = """Encode <mask> sequence"""
__lowercase : List[str] = """Encode <mask>sequence"""
__lowercase : Union[str, Any] = tokenizer.encode(__a )
__lowercase : Dict = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__a , __a )
__lowercase : int = tokenizer.encode(__a )
__lowercase : Union[str, Any] = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__a , __a )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowercase : List[Any] = self.tokenizer_class.from_pretrained(__a , **__a )
__lowercase : Optional[Any] = """A, <mask> AllenNLP sentence."""
__lowercase : Union[str, Any] = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
__lowercase : Optional[Any] = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__lowercase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowercase : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""trim_offsets"""] , __a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase : int = F"{text_of_1_token} {text_of_1_token}"
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Any = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : int = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ) + 1, 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Dict = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
| 649
| 1
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
return round(float(moles / volume ) * nfactor )
def snake_case_ ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def snake_case_ ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def snake_case_ ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Dict , __a : Union[str, Any]=13 , __a : Dict=7 , __a : Dict=True , __a : Dict=True , __a : Any=True , __a : List[str]=True , __a : int=99 , __a : Optional[int]=32 , __a : str=2 , __a : int=4 , __a : List[str]=37 , __a : Union[str, Any]="gelu" , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=0.1 , __a : List[Any]=512 , __a : int=16 , __a : Union[str, Any]=2 , __a : Union[str, Any]=0.02 , __a : List[str]=3 , __a : Dict=4 , __a : Optional[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = parent
__lowercase : Tuple = 13
__lowercase : Dict = 7
__lowercase : List[Any] = True
__lowercase : Tuple = True
__lowercase : List[str] = True
__lowercase : Any = True
__lowercase : Optional[int] = 99
__lowercase : str = 384
__lowercase : Optional[Any] = 2
__lowercase : Dict = 4
__lowercase : str = 37
__lowercase : Optional[int] = """gelu"""
__lowercase : int = 0.1
__lowercase : Union[str, Any] = 0.1
__lowercase : Tuple = 512
__lowercase : Tuple = 16
__lowercase : Optional[int] = 2
__lowercase : Optional[Any] = 0.02
__lowercase : Dict = 3
__lowercase : Union[str, Any] = 4
__lowercase : Tuple = 128
__lowercase : Optional[Any] = 2
__lowercase : int = 9
__lowercase : List[Any] = 1
__lowercase : Union[str, Any] = None
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[Any] = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
if self.use_token_type_ids:
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Optional[Any] = None
__lowercase : str = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Optional[int] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict , __a : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : str , __a : Union[str, Any] , __a : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TFConvBertModel(config=__a )
__lowercase : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase : Any = [input_ids, input_mask]
__lowercase : Dict = model(__a )
__lowercase : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Any , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Dict , __a : str ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = TFConvBertForMaskedLM(config=__a )
__lowercase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : Any = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : int , __a : Any , __a : Optional[int] , __a : int , __a : int , __a : List[Any] , __a : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.num_labels
__lowercase : List[Any] = TFConvBertForSequenceClassification(config=__a )
__lowercase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[str] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] , __a : Any , __a : Optional[Any] , __a : int , __a : Optional[int] , __a : Tuple , __a : int , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = self.num_choices
__lowercase : Dict = TFConvBertForMultipleChoice(config=__a )
__lowercase : List[str] = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : int = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowercase : Dict = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[str] , __a : str , __a : List[str] , __a : List[str] , __a : List[str] , __a : Any , __a : Tuple , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Tuple = TFConvBertForTokenClassification(config=__a )
__lowercase : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : str = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , __a : Optional[int] , __a : List[str] , __a : Optional[Any] , __a : int , __a : Tuple , __a : Any , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = TFConvBertForQuestionAnswering(config=__a )
__lowercase : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[Any] = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : int = config_and_inputs
__lowercase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A : str = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Union[str, Any] = False
_A : List[str] = False
_A : Dict = False
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : int = TFConvBertModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Union[str, Any] = True
__lowercase : List[Any] = True
if hasattr(__a , """use_cache""" ):
__lowercase : Optional[Any] = True
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : int = getattr(self.model_tester , """key_length""" , __a )
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Tuple = model_class(__a )
__lowercase : Tuple = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
__lowercase : List[Any] = os.path.join(__a , """saved_model""" , """1""" )
__lowercase : str = tf.keras.models.load_model(__a )
__lowercase : Optional[int] = model(__a )
if self.is_encoder_decoder:
__lowercase : Union[str, Any] = outputs["""encoder_hidden_states"""]
__lowercase : Union[str, Any] = outputs["""encoder_attentions"""]
else:
__lowercase : Union[str, Any] = outputs["""hidden_states"""]
__lowercase : List[str] = outputs["""attentions"""]
self.assertEqual(len(__a ) , __a )
__lowercase : List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : List[str] = True
__lowercase : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
__lowercase : Optional[int] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : List[str] = getattr(self.model_tester , """key_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """key_length""" , __a )
def check_decoder_attentions_output(__a : List[str] ):
__lowercase : Union[str, Any] = len(__a )
self.assertEqual(out_len % 2 , 0 )
__lowercase : Any = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a : str ):
__lowercase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase : int = True
__lowercase : Any = False
__lowercase : List[Any] = model_class(__a )
__lowercase : Tuple = model(self._prepare_for_class(__a , __a ) )
__lowercase : Dict = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
__lowercase : Any = model_class(__a )
__lowercase : List[str] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase : Dict = True
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
__lowercase : List[str] = True
__lowercase : List[Any] = True
__lowercase : Any = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
__lowercase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase : Tuple = model(__a )[0]
__lowercase : Any = [1, 6, 768]
self.assertEqual(output.shape , __a )
__lowercase : Optional[Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 649
| 1
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
# Initialise PyTorch model
__lowercase : Union[str, Any] = BertConfig.from_json_file(lowerCAmelCase_ )
print(F"Building PyTorch model from configuration: {config}" )
__lowercase : int = BertForPreTraining(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 649
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : int , *__a : Dict , **__a : Optional[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , __a , )
super().__init__(*__a , **__a )
| 649
| 1
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
if not is_accelerate_available():
return method
__lowercase : Optional[int] = version.parse(accelerate.__version__ ).base_version
if version.parse(lowerCAmelCase_ ) < version.parse("""0.17.0""" ):
return method
def wrapper(self : Tuple , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Optional[int] ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *lowerCAmelCase_ , **lowerCAmelCase_ )
return wrapper
| 649
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__lowercase : List[str] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Dict = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__lowercase : List[str] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 16000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__lowercase : Tuple = tempfile.mkdtemp()
__lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , __a )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
# load decoder from hub
__lowercase : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def lowerCAmelCase ( self : Optional[Any] , **__a : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__a )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , **__a : int ) -> Tuple:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Union[str, Any] , **__a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Any = self.get_feature_extractor()
__lowercase : str = self.get_decoder()
__lowercase : Tuple = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
processor.save_pretrained(self.tmpdirname )
__lowercase : Tuple = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __a )
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase : str = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__a , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : int = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[int] = floats_list((3, 1000) )
__lowercase : List[Any] = feature_extractor(__a , return_tensors="""np""" )
__lowercase : List[str] = processor(__a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : int = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = """This is a test string"""
__lowercase : Any = processor(text=__a )
__lowercase : Dict = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self : str , __a : Tuple=(2, 10, 16) , __a : int=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(__a )
return np.random.rand(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : str = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[str] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__lowercase : Optional[Any] = processor.decode(__a )
__lowercase : Any = decoder.decode_beams(__a )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowerCAmelCase ( self : List[str] , __a : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : Optional[int] = self.get_decoder()
__lowercase : Any = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase : Union[str, Any] = processor.batch_decode(__a )
else:
with get_context(__a ).Pool() as pool:
__lowercase : Optional[Any] = processor.batch_decode(__a , __a )
__lowercase : Union[str, Any] = list(__a )
with get_context("""fork""" ).Pool() as p:
__lowercase : Optional[Any] = decoder.decode_beams_batch(__a , __a )
__lowercase , __lowercase , __lowercase : Any = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__a , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__a , decoded_processor.logit_score )
self.assertListEqual(__a , decoded_processor.lm_score )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : List[str] = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = self._get_dummy_logits()
__lowercase : Tuple = 15
__lowercase : Tuple = -20.0
__lowercase : Dict = -4.0
__lowercase : Dict = processor.batch_decode(
__a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Tuple = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Any = decoder.decode_beams_batch(
__a , __a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][2] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __a )
self.assertTrue(np.array_equal(__a , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __a , atol=1E-3 ) )
self.assertTrue(np.array_equal(__a , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __a , atol=1E-3 ) )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : List[Any] = self.get_tokenizer()
__lowercase : List[Any] = self.get_decoder()
__lowercase : Dict = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[Any] = self._get_dummy_logits()
__lowercase : Optional[int] = 2.0
__lowercase : Tuple = 5.0
__lowercase : Optional[Any] = -20.0
__lowercase : Tuple = True
__lowercase : Union[str, Any] = processor.batch_decode(
__a , alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
__lowercase : Any = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
decoder.reset_params(
alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Tuple = decoder.decode_beams_batch(
__a , __a , )
__lowercase : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __a )
__lowercase : str = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : str = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : int = os.listdir(__a )
__lowercase : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(__a )
__lowercase : Dict = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : List[Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : Dict = os.listdir(__a )
__lowercase : List[Any] = os.listdir(__a )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = floats_list((3, 1000) )
__lowercase : List[str] = processor_wavaveca(__a , return_tensors="""np""" )
__lowercase : List[Any] = processor_auto(__a , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__lowercase : List[str] = self._get_dummy_logits()
__lowercase : List[str] = processor_wavaveca.batch_decode(__a )
__lowercase : Optional[int] = processor_auto.batch_decode(__a )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowerCAmelCase ( __a : Union[str, Any] , __a : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Any = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = self._get_dummy_logits()[0]
__lowercase : Dict = processor.decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = self._get_dummy_logits()
__lowercase : Dict = processor.batch_decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__a , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
import torch
__lowercase : Any = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__a )
__lowercase : str = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=16000 ) )
__lowercase : Tuple = iter(__a )
__lowercase : Union[str, Any] = next(__a )
__lowercase : int = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__lowercase : int = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase : Union[str, Any] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__lowercase : List[Any] = model(__a ).logits.cpu().numpy()
__lowercase : Tuple = processor.decode(logits[0] , output_word_offsets=__a )
__lowercase : int = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase : Optional[Any] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__lowercase : str = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , __a )
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , output.text )
# output times
__lowercase : Tuple = torch.tensor(self.get_from_offsets(__a , """start_time""" ) )
__lowercase : Dict = torch.tensor(self.get_from_offsets(__a , """end_time""" ) )
# fmt: off
__lowercase : List[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__lowercase : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
| 649
| 1
|
import numpy as np
def snake_case_ ( lowerCAmelCase_ : np.ndarray ):
return 1 / (1 + np.exp(-vector ))
def snake_case_ ( lowerCAmelCase_ : np.ndarray ):
return vector * sigmoid(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(0 ) == 0 )
def snake_case_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 649
| 1
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : Union[str, Any] , __a : int=13 , __a : str=7 , __a : Dict=False , __a : List[Any]=True , __a : Dict=False , __a : str=True , __a : int=33 , __a : List[str]=32 , __a : int=5 , __a : Dict=4 , __a : str=37 , __a : Optional[Any]="gelu" , __a : Any=0.1 , __a : List[str]=0.1 , __a : List[str]=512 , __a : Dict=16 , __a : Optional[Any]=2 , __a : str=0.02 , __a : Union[str, Any]=3 , __a : Optional[Any]=4 , __a : List[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = parent
__lowercase : int = batch_size
__lowercase : Tuple = seq_length
__lowercase : Dict = is_training
__lowercase : Union[str, Any] = use_input_mask
__lowercase : str = use_token_type_ids
__lowercase : Optional[Any] = use_labels
__lowercase : Union[str, Any] = vocab_size
__lowercase : Any = hidden_size
__lowercase : List[str] = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : int = intermediate_size
__lowercase : str = hidden_act
__lowercase : str = hidden_dropout_prob
__lowercase : Dict = attention_probs_dropout_prob
__lowercase : str = max_position_embeddings
__lowercase : List[str] = type_vocab_size
__lowercase : Optional[int] = type_sequence_label_size
__lowercase : str = initializer_range
__lowercase : Optional[Any] = num_labels
__lowercase : List[Any] = num_choices
__lowercase : Dict = scope
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Dict = None
if self.use_input_mask:
__lowercase : str = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : List[Any] = None
__lowercase : Dict = None
__lowercase : Union[str, Any] = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Tuple , __a : Any , __a : Tuple , __a : Optional[Any] , __a : Union[str, Any] , __a : Any , __a : List[Any] ) -> str:
"""simple docstring"""
__lowercase : Dict = EsmModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[int] = model(__a , attention_mask=__a )
__lowercase : str = model(__a )
__lowercase : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Tuple , __a : Tuple , __a : Dict , __a : Union[str, Any] , __a : str , __a : Union[str, Any] , __a : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Tuple = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
__lowercase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : str , __a : Optional[int] , __a : int , __a : str , __a : Tuple , __a : str , __a : Tuple ) -> str:
"""simple docstring"""
__lowercase : Any = self.num_labels
__lowercase : Union[str, Any] = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
__lowercase : str = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase : str = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : Any = config_and_inputs
__lowercase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : int = False
_A : int = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
_A : str = ()
_A : Optional[int] = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : List[str] = True
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = EsmModelTester(self )
__lowercase : str = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase : Optional[Any] = type
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : int = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : Optional[Any] = EsmEmbeddings(config=__a )
__lowercase : Optional[int] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
__lowercase : List[str] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__lowercase : List[str] = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : List[Any] = EsmEmbeddings(config=__a )
__lowercase : int = torch.empty(2 , 4 , 30 )
__lowercase : Optional[int] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__lowercase : str = torch.as_tensor([expected_single_positions, expected_single_positions] )
__lowercase : Tuple = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
@require_torch
class lowerCAmelCase ( __a ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : List[str] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : str = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase : List[Any] = model(__a )[0]
__lowercase : str = 33
__lowercase : List[Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
__lowercase : List[str] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
with torch.no_grad():
__lowercase : Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : Optional[int] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__lowercase : str = model(__a )[0]
# compare the actual values for a slice.
__lowercase : Optional[int] = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
| 649
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase : int = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def snake_case_ ( ):
__lowercase : List[Any] = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase : Union[str, Any] = get_sagemaker_input()
else:
__lowercase : str = get_cluster_input()
return config
def snake_case_ ( lowerCAmelCase_ : List[str]=None ):
if subparsers is not None:
__lowercase : Optional[int] = subparsers.add_parser("""config""" , description=lowerCAmelCase_ )
else:
__lowercase : List[str] = argparse.ArgumentParser("""Accelerate config command""" , description=lowerCAmelCase_ )
parser.add_argument(
"""--config_file""" , default=lowerCAmelCase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : Union[str, Any] = get_user_input()
if args.config_file is not None:
__lowercase : List[Any] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
__lowercase : Any = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(lowerCAmelCase_ )
else:
config.to_yaml_file(lowerCAmelCase_ )
print(F"accelerate configuration saved at {config_file}" )
def snake_case_ ( ):
__lowercase : str = config_command_parser()
__lowercase : str = parser.parse_args()
config_command(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 649
| 1
|
import argparse
import os
import re
lowerCamelCase : Optional[int] = '''src/transformers'''
# Pattern that looks at the indentation in a line.
lowerCamelCase : Dict = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCamelCase : Any = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCamelCase : Tuple = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCamelCase : Optional[int] = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCamelCase : Any = re.compile(r'''\[([^\]]+)\]''')
def snake_case_ ( lowerCAmelCase_ : Dict ):
__lowercase : Union[str, Any] = _re_indent.search(lowerCAmelCase_ )
return "" if search is None else search.groups()[0]
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : Dict="" , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=None ):
__lowercase : Any = 0
__lowercase : Optional[Any] = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(lowerCAmelCase_ ):
index += 1
__lowercase : Union[str, Any] = ["""\n""".join(lines[:index] )]
else:
__lowercase : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowercase : List[Any] = [lines[index]]
index += 1
while index < len(lowerCAmelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCAmelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCAmelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(lowerCAmelCase_ ) )
if index < len(lowerCAmelCase_ ) - 1:
__lowercase : List[Any] = [lines[index + 1]]
index += 1
else:
__lowercase : Tuple = []
else:
blocks.append("""\n""".join(lowerCAmelCase_ ) )
__lowercase : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCAmelCase_ ) > 0:
blocks.append("""\n""".join(lowerCAmelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCAmelCase_ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def snake_case_ ( lowerCAmelCase_ : Dict ):
def _inner(lowerCAmelCase_ : Any ):
return key(lowerCAmelCase_ ).lower().replace("""_""" , """""" )
return _inner
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int]=None ):
# If no key is provided, we use a noop.
def noop(lowerCAmelCase_ : Any ):
return x
if key is None:
__lowercase : Dict = noop
# Constants are all uppercase, they go first.
__lowercase : Union[str, Any] = [obj for obj in objects if key(lowerCAmelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowercase : Optional[int] = [obj for obj in objects if key(lowerCAmelCase_ )[0].isupper() and not key(lowerCAmelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
__lowercase : List[str] = [obj for obj in objects if not key(lowerCAmelCase_ )[0].isupper()]
__lowercase : str = ignore_underscore(lowerCAmelCase_ )
return sorted(lowerCAmelCase_ , key=lowerCAmelCase_ ) + sorted(lowerCAmelCase_ , key=lowerCAmelCase_ ) + sorted(lowerCAmelCase_ , key=lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : List[str] ):
# This inner function sort imports between [ ].
def _replace(lowerCAmelCase_ : Optional[Any] ):
__lowercase : Dict = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
__lowercase : Any = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase : List[str] = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(lowerCAmelCase_ )] ) + "]"
__lowercase : Tuple = import_statement.split("""\n""" )
if len(lowerCAmelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowercase : Optional[Any] = 2 if lines[1].strip() == """[""" else 1
__lowercase : Tuple = [(i, _re_strip_line.search(lowerCAmelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowercase : Any = sort_objects(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x[1] )
__lowercase : int = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCAmelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowercase : Tuple = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowercase : Any = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase : Any = keys[:-1]
__lowercase : Optional[Any] = get_indent(lines[1] ) + """, """.join([F"\"{k}\"" for k in sort_objects(lowerCAmelCase_ )] )
return "\n".join(lowerCAmelCase_ )
else:
# Finally we have to deal with imports fitting on one line
__lowercase : str = _re_bracket_content.sub(_replace , lowerCAmelCase_ )
return import_statement
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple=True ):
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as f:
__lowercase : List[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowercase : Any = split_code_in_indented_blocks(
lowerCAmelCase_ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCAmelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowercase : Any = main_blocks[block_idx]
__lowercase : str = block.split("""\n""" )
# Get to the start of the imports.
__lowercase : Any = 0
while line_idx < len(lowerCAmelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowercase : Optional[int] = len(lowerCAmelCase_ )
else:
line_idx += 1
if line_idx >= len(lowerCAmelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowercase : List[Any] = """\n""".join(block_lines[line_idx:-1] )
__lowercase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowercase : Union[str, Any] = split_code_in_indented_blocks(lowerCAmelCase_ , indent_level=lowerCAmelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowercase : str = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowercase : Dict = [(pattern.search(lowerCAmelCase_ ).groups()[0] if pattern.search(lowerCAmelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowercase : List[str] = [(i, key) for i, key in enumerate(lowerCAmelCase_ ) if key is not None]
__lowercase : int = [x[0] for x in sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowercase : Any = 0
__lowercase : List[str] = []
for i in range(len(lowerCAmelCase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
__lowercase : Dict = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCAmelCase_ )
count += 1
# And we put our main block back together with its first and last line.
__lowercase : Optional[Any] = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCAmelCase_ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ : Optional[int]=True ):
__lowercase : int = []
for root, _, files in os.walk(lowerCAmelCase_ ):
if "__init__.py" in files:
__lowercase : Optional[int] = sort_imports(os.path.join(lowerCAmelCase_ , """__init__.py""" ) , check_only=lowerCAmelCase_ )
if result:
__lowercase : Dict = [os.path.join(lowerCAmelCase_ , """__init__.py""" )]
if len(lowerCAmelCase_ ) > 0:
raise ValueError(F"Would overwrite {len(lowerCAmelCase_ )} files, run `make style`." )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCamelCase : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 649
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] | None = None ):
__lowercase : Tuple = word_bank or []
# create a table
__lowercase : int = len(lowerCAmelCase_ ) + 1
__lowercase : list[list[list[str]]] = []
for _ in range(lowerCAmelCase_ ):
table.append([] )
# seed value
__lowercase : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCAmelCase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCAmelCase_ )] == word:
__lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCAmelCase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCAmelCase_ )]:
combination.reverse()
return table[len(lowerCAmelCase_ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 649
| 1
|
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"""split_dict""" , [
SplitDict(),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1337 , num_examples=42 , dataset_name="""my_dataset""" )} ),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"""train""": SplitInfo()} ),
] , )
def snake_case_ ( lowerCAmelCase_ : SplitDict ):
__lowercase : List[Any] = split_dict._to_yaml_list()
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
__lowercase : List[Any] = SplitDict._from_yaml_list(lowerCAmelCase_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
__lowercase : Optional[Any] = None
# the split name of split_dict takes over the name of the split info object
__lowercase : List[Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"""split_info""" , [SplitInfo(), SplitInfo(dataset_name=lowerCAmelCase_ ), SplitInfo(dataset_name="""my_dataset""" )] )
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
__lowercase : Any = asdict(SplitDict({"""train""": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 649
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(1 ) != 0 )
def snake_case_ ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 649
| 1
|
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
lowerCamelCase : Optional[int] = True
from torch.cuda.amp import autocast
lowerCamelCase : Optional[int] = logging.getLogger(__name__)
def snake_case_ ( lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[Any]=None ):
return field(default_factory=lambda: default , metadata=lowerCAmelCase_ )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_A : Optional[bool] = field(
default=__a , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
_A : Optional[float] = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for the attention probabilities.'''} )
_A : Optional[float] = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for activations inside the fully connected layer.'''} )
_A : Optional[float] = field(
default=0.1 , metadata={
'''help''': '''The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'''
} , )
_A : Optional[float] = field(
default=0.1 , metadata={'''help''': '''The dropout probabilitiy for all 1D convolutional layers in feature extractor.'''} , )
_A : Optional[float] = field(
default=0.0_5 , metadata={
'''help''': (
'''Propability of each feature vector along the time axis to be chosen as the start of the vector'''
'''span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'''
'''vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'''
)
} , )
_A : Optional[float] = field(default=0.0 , metadata={'''help''': '''The LayerDrop probability.'''} )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_A : Optional[str] = field(
default='''train+validation''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
_A : bool = field(
default=__a , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
_A : Optional[int] = field(
default=__a , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_A : Optional[int] = field(
default=__a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_A : Optional[int] = field(
default=__a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of validation examples to this '''
'''value if set.'''
)
} , )
_A : List[str] = list_field(
default=[''',''', '''?''', '''.''', '''!''', '''-''', ''';''', ''':''', '''""''', '''%''', '''\'''', '''"''', '''�'''] , metadata={'''help''': '''A list of characters to remove from the transcripts.'''} , )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : WavaVecaProcessor
_A : Union[bool, str] = True
_A : Optional[int] = None
_A : Optional[int] = None
_A : Optional[int] = None
_A : Optional[int] = None
def __call__( self : Optional[Any] , __a : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
__lowercase : Dict = [{"""input_values""": feature["""input_values"""]} for feature in features]
__lowercase : int = [{"""input_ids""": feature["""labels"""]} for feature in features]
__lowercase : Dict = self.processor.pad(
__a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
__lowercase : Dict = self.processor.pad(
labels=__a , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="""pt""" , )
# replace padding with -100 to ignore loss correctly
__lowercase : Optional[int] = labels_batch["""input_ids"""].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
__lowercase : Optional[Any] = labels
return batch
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] , __a : nn.Module , __a : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
"""simple docstring"""
model.train()
__lowercase : str = self._prepare_inputs(__a )
if self.use_amp:
with autocast():
__lowercase : Any = self.compute_loss(__a , __a )
else:
__lowercase : int = self.compute_loss(__a , __a )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__lowercase : str = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__lowercase : Optional[int] = loss.sum() / (inputs["""labels"""] >= 0).sum()
else:
raise ValueError(F"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" )
if self.args.gradient_accumulation_steps > 1:
__lowercase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a ).backward()
elif self.use_apex:
with amp.scale_loss(__a , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a )
else:
loss.backward()
return loss.detach()
def snake_case_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase , __lowercase , __lowercase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase , __lowercase , __lowercase : List[str] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowercase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , lowerCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__lowercase : Union[str, Any] = datasets.load_dataset(
"""common_voice""" , data_args.dataset_config_name , split=data_args.train_split_name )
__lowercase : List[str] = datasets.load_dataset("""common_voice""" , data_args.dataset_config_name , split="""test""" )
# Create and save tokenizer
__lowercase : str = F"[{''.join(data_args.chars_to_ignore )}]"
def remove_special_characters(lowerCAmelCase_ : Any ):
__lowercase : Any = re.sub(lowerCAmelCase_ , """""" , batch["""sentence"""] ).lower() + """ """
return batch
__lowercase : int = train_dataset.map(lowerCAmelCase_ , remove_columns=["""sentence"""] )
__lowercase : Tuple = eval_dataset.map(lowerCAmelCase_ , remove_columns=["""sentence"""] )
def extract_all_chars(lowerCAmelCase_ : Optional[Any] ):
__lowercase : Union[str, Any] = """ """.join(batch["""text"""] )
__lowercase : str = list(set(lowerCAmelCase_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
__lowercase : Optional[int] = train_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , batch_size=-1 , keep_in_memory=lowerCAmelCase_ , remove_columns=train_dataset.column_names , )
__lowercase : Optional[Any] = train_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , batch_size=-1 , keep_in_memory=lowerCAmelCase_ , remove_columns=eval_dataset.column_names , )
__lowercase : List[Any] = list(set(vocab_train["""vocab"""][0] ) | set(vocab_test["""vocab"""][0] ) )
__lowercase : Optional[int] = {v: k for k, v in enumerate(lowerCAmelCase_ )}
__lowercase : Any = vocab_dict[""" """]
del vocab_dict[" "]
__lowercase : List[Any] = len(lowerCAmelCase_ )
__lowercase : Optional[Any] = len(lowerCAmelCase_ )
with open("""vocab.json""" , """w""" ) as vocab_file:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase : Union[str, Any] = WavaVecaCTCTokenizer(
"""vocab.json""" , unk_token="""[UNK]""" , pad_token="""[PAD]""" , word_delimiter_token="""|""" , )
__lowercase : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ )
__lowercase : str = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
__lowercase : List[Any] = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="""mean""" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
__lowercase : Union[str, Any] = min(len(lowerCAmelCase_ ) , data_args.max_train_samples )
__lowercase : Optional[int] = train_dataset.select(range(lowerCAmelCase_ ) )
if data_args.max_val_samples is not None:
__lowercase : int = eval_dataset.select(range(data_args.max_val_samples ) )
__lowercase : List[str] = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(lowerCAmelCase_ : List[Any] ):
__lowercase , __lowercase : Optional[int] = torchaudio.load(batch["""path"""] )
__lowercase : Tuple = resampler(lowerCAmelCase_ ).squeeze().numpy()
__lowercase : Any = 16000
__lowercase : Union[str, Any] = batch["""text"""]
return batch
__lowercase : List[Any] = train_dataset.map(
lowerCAmelCase_ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
__lowercase : Dict = eval_dataset.map(
lowerCAmelCase_ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(lowerCAmelCase_ : str ):
# check that all files have the correct sampling rate
assert (
len(set(batch["""sampling_rate"""] ) ) == 1
), F"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
__lowercase : str = processor(
audio=batch["""speech"""] , text=batch["""target_text"""] , sampling_rate=batch["""sampling_rate"""][0] )
batch.update(lowerCAmelCase_ )
return batch
__lowercase : Any = train_dataset.map(
lowerCAmelCase_ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , )
__lowercase : Optional[int] = eval_dataset.map(
lowerCAmelCase_ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , )
# Metric
__lowercase : Optional[Any] = datasets.load_metric("""wer""" )
def compute_metrics(lowerCAmelCase_ : str ):
__lowercase : str = pred.predictions
__lowercase : int = np.argmax(lowerCAmelCase_ , axis=-1 )
__lowercase : Optional[Any] = processor.tokenizer.pad_token_id
__lowercase : Optional[int] = processor.batch_decode(lowerCAmelCase_ )
# we do not want to group tokens when computing the metrics
__lowercase : Any = processor.batch_decode(pred.label_ids , group_tokens=lowerCAmelCase_ )
__lowercase : Union[str, Any] = wer_metric.compute(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__lowercase : Optional[int] = DataCollatorCTCWithPadding(processor=lowerCAmelCase_ , padding=lowerCAmelCase_ )
# Initialize our Trainer
__lowercase : Optional[Any] = CTCTrainer(
model=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , args=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowercase : Optional[int] = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__lowercase : int = model_args.model_name_or_path
else:
__lowercase : Union[str, Any] = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__lowercase : Union[str, Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model()
__lowercase : List[Any] = train_result.metrics
__lowercase : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
__lowercase : Union[str, Any] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("""train""" , lowerCAmelCase_ )
trainer.save_metrics("""train""" , lowerCAmelCase_ )
trainer.save_state()
# Evaluation
__lowercase : int = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowercase : int = trainer.evaluate()
__lowercase : Any = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowerCAmelCase_ )
__lowercase : Tuple = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("""eval""" , lowerCAmelCase_ )
trainer.save_metrics("""eval""" , lowerCAmelCase_ )
return results
if __name__ == "__main__":
main()
| 649
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : int = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 649
| 1
|
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 649
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCamelCase : Any = None
try:
import msvcrt
except ImportError:
lowerCamelCase : str = None
try:
import fcntl
except ImportError:
lowerCamelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
lowerCamelCase : Tuple = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
lowerCamelCase : Tuple = '''3.0.12'''
lowerCamelCase : Any = None
def snake_case_ ( ):
global _logger
__lowercase : List[str] = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , __a : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = lock_file
return None
def __str__( self : str ) -> Any:
"""simple docstring"""
__lowercase : Any = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = lock
return None
def __enter__( self : Dict ) -> Dict:
"""simple docstring"""
return self.lock
def __exit__( self : Optional[int] , __a : Dict , __a : Any , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.lock.release()
return None
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __a : Any , __a : Dict=-1 , __a : Optional[Any]=None ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowercase : Dict = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
__lowercase : Optional[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowercase : int = None
# The default timeout value.
__lowercase : Optional[int] = timeout
# We use this lock primarily for the lock counter.
__lowercase : Optional[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowercase : Union[str, Any] = 0
return None
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self._lock_file
@property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self._timeout
@timeout.setter
def lowerCAmelCase ( self : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = float(__a )
return None
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@property
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self._lock_file_fd is not None
def lowerCAmelCase ( self : Any , __a : Optional[Any]=None , __a : Union[str, Any]=0.05 ) -> List[str]:
"""simple docstring"""
if timeout is None:
__lowercase : Union[str, Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowercase : int = id(self )
__lowercase : Optional[Any] = self._lock_file
__lowercase : List[str] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowercase : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowercase : Optional[Any] = id(self )
__lowercase : str = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
__lowercase : List[str] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : List[str] , __a : str , __a : int , __a : List[Any] ) -> Tuple:
"""simple docstring"""
self.release()
return None
def __del__( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.release(force=__a )
return None
def lowerCAmelCase ( self : Tuple , __a : str , __a : int ) -> str:
"""simple docstring"""
__lowercase : List[Any] = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
__lowercase : int = os.path.dirname(__a )
__lowercase : List[str] = str(hash(__a ) )
__lowercase : Optional[Any] = filename[: max_length - len(__a ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__a , __a )
else:
return path
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : List[Any] , __a : Optional[int]=-1 , __a : Tuple=None ) -> List[Any]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
__lowercase : Tuple = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowercase : Tuple = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
__lowercase : Union[str, Any] = fd
return None
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self._lock_file_fd
__lowercase : int = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : List[str] , __a : Optional[Any] , __a : str=-1 , __a : List[str]=None ) -> Any:
"""simple docstring"""
__lowercase : Dict = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowercase : List[str] = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
__lowercase : str = fd
return None
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = self._lock_file_fd
__lowercase : List[str] = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowercase : Union[str, Any] = os.open(self._lock_file , __a )
except OSError:
pass
else:
__lowercase : Optional[int] = fd
return None
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
os.close(self._lock_file_fd )
__lowercase : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCamelCase : Optional[Any] = None
if msvcrt:
lowerCamelCase : List[Any] = WindowsFileLock
elif fcntl:
lowerCamelCase : List[Any] = UnixFileLock
else:
lowerCamelCase : Union[str, Any] = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 649
| 1
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase : Optional[int] = 16
lowerCamelCase : Union[str, Any] = 32
def snake_case_ ( lowerCAmelCase_ : Any ):
return int(x / 2**20 )
class lowerCAmelCase :
'''simple docstring'''
def __enter__( self : Any ) -> Tuple:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__lowercase : Tuple = torch.cuda.memory_allocated()
return self
def __exit__( self : List[str] , *__a : Any ) -> Union[str, Any]:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
__lowercase : List[str] = torch.cuda.memory_allocated()
__lowercase : str = torch.cuda.max_memory_allocated()
__lowercase : Any = bamb(self.end - self.begin )
__lowercase : Dict = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def snake_case_ ( lowerCAmelCase_ : Accelerator , lowerCAmelCase_ : int = 16 , lowerCAmelCase_ : str = "bert-base-cased" , lowerCAmelCase_ : int = 320 , lowerCAmelCase_ : int = 160 , ):
__lowercase : int = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
__lowercase : Any = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": F"train[:{n_train}]", """validation""": F"validation[:{n_val}]"} )
def tokenize_function(lowerCAmelCase_ : str ):
# max_length=None => use the model max length (it's actually the default)
__lowercase : Tuple = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowercase : int = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCAmelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase : Optional[int] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase_ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase_ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCAmelCase_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowercase : List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
__lowercase : Dict = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ):
# Initialize accelerator
__lowercase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase : Any = config["""lr"""]
__lowercase : Union[str, Any] = int(config["""num_epochs"""] )
__lowercase : Optional[Any] = int(config["""seed"""] )
__lowercase : str = int(config["""batch_size"""] )
__lowercase : Optional[Any] = args.model_name_or_path
set_seed(lowerCAmelCase_ )
__lowercase , __lowercase : Any = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase : List[Any] = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
# Instantiate optimizer
__lowercase : int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowercase : Tuple = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase_ )
if accelerator.state.deepspeed_plugin is not None:
__lowercase : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowercase : Union[str, Any] = 1
__lowercase : Optional[Any] = (len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowercase : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase_ , )
else:
__lowercase : Dict = DummyScheduler(lowerCAmelCase_ , total_num_steps=lowerCAmelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Optional[Any] = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# We need to keep track of how many total steps we have iterated over
__lowercase : str = 0
# We also need to keep track of the stating epoch so files are named properly
__lowercase : Tuple = 0
# Now we train the model
__lowercase : List[Any] = {}
for epoch in range(lowerCAmelCase_ , lowerCAmelCase_ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
__lowercase : str = model(**lowerCAmelCase_ )
__lowercase : Optional[int] = outputs.loss
__lowercase : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__lowercase : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"epoch-{epoch}"] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( ):
__lowercase : Dict = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowerCAmelCase_ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCAmelCase_ , )
parser.add_argument(
"""--output_dir""" , type=lowerCAmelCase_ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=lowerCAmelCase_ , default=320 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=lowerCAmelCase_ , default=160 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCAmelCase_ , default=1 , help="""Number of train epochs.""" , )
__lowercase : List[Any] = parser.parse_args()
__lowercase : List[str] = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 649
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''layoutlmv3'''
def __init__( self : Dict , __a : List[str]=50265 , __a : str=768 , __a : List[Any]=12 , __a : List[Any]=12 , __a : List[str]=3072 , __a : Optional[Any]="gelu" , __a : Optional[int]=0.1 , __a : List[Any]=0.1 , __a : Tuple=512 , __a : int=2 , __a : Any=0.02 , __a : Union[str, Any]=1E-5 , __a : List[str]=1 , __a : List[Any]=0 , __a : int=2 , __a : str=1024 , __a : str=128 , __a : List[Any]=128 , __a : Tuple=True , __a : Optional[int]=32 , __a : Any=128 , __a : List[Any]=64 , __a : Tuple=256 , __a : str=True , __a : int=True , __a : Optional[Any]=True , __a : Any=224 , __a : str=3 , __a : List[str]=16 , __a : Union[str, Any]=None , **__a : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
vocab_size=__a , hidden_size=__a , num_hidden_layers=__a , num_attention_heads=__a , intermediate_size=__a , hidden_act=__a , hidden_dropout_prob=__a , attention_probs_dropout_prob=__a , max_position_embeddings=__a , type_vocab_size=__a , initializer_range=__a , layer_norm_eps=__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a , )
__lowercase : int = max_ad_position_embeddings
__lowercase : Any = coordinate_size
__lowercase : Optional[Any] = shape_size
__lowercase : str = has_relative_attention_bias
__lowercase : int = rel_pos_bins
__lowercase : Union[str, Any] = max_rel_pos
__lowercase : str = has_spatial_attention_bias
__lowercase : str = rel_ad_pos_bins
__lowercase : List[Any] = max_rel_ad_pos
__lowercase : Tuple = text_embed
__lowercase : int = visual_embed
__lowercase : Tuple = input_size
__lowercase : Dict = num_channels
__lowercase : str = patch_size
__lowercase : Optional[int] = classifier_dropout
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = version.parse('''1.12''' )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return 12
def lowerCAmelCase ( self : List[Any] , __a : "ProcessorMixin" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , __a )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase : Tuple = processor.tokenizer.num_special_tokens_to_add(__a )
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
__lowercase : Union[str, Any] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase : Tuple = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase : Tuple = self._generate_dummy_images(__a , __a , __a , __a )
__lowercase : int = dict(
processor(
__a , text=__a , boxes=__a , return_tensors=__a , ) )
return inputs
| 649
| 1
|
from __future__ import annotations
lowerCamelCase : Optional[int] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def snake_case_ ( lowerCAmelCase_ : list[list[int]] , lowerCAmelCase_ : list[int] , lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : list[list[int]] , ):
__lowercase : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCAmelCase_ ) )
] # the reference grid
__lowercase : str = 1
__lowercase : Optional[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCAmelCase_ ) )
] # the action grid
__lowercase : Optional[Any] = init[0]
__lowercase : Dict = init[1]
__lowercase : str = 0
__lowercase : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
__lowercase : Any = [[f, g, x, y]]
__lowercase : Union[str, Any] = False # flag that is set when search is complete
__lowercase : Optional[int] = False # flag set if we can't find expand
while not found and not resign:
if len(lowerCAmelCase_ ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__lowercase : Tuple = cell.pop()
__lowercase : Optional[int] = next_cell[2]
__lowercase : Tuple = next_cell[3]
__lowercase : List[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
__lowercase : Optional[Any] = True
else:
for i in range(len(lowerCAmelCase_ ) ): # to try out different valid actions
__lowercase : Dict = x + DIRECTIONS[i][0]
__lowercase : Any = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(lowerCAmelCase_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__lowercase : str = g + cost
__lowercase : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__lowercase : List[Any] = 1
__lowercase : List[str] = i
__lowercase : int = []
__lowercase : str = goal[0]
__lowercase : Dict = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__lowercase : List[str] = x - DIRECTIONS[action[x][y]][0]
__lowercase : int = y - DIRECTIONS[action[x][y]][1]
__lowercase : int = xa
__lowercase : int = ya
invpath.append([x, y] )
__lowercase : List[Any] = []
for i in range(len(lowerCAmelCase_ ) ):
path.append(invpath[len(lowerCAmelCase_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowerCamelCase : Any = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowerCamelCase : Any = [0, 0]
# all coordinates are given in format [y,x]
lowerCamelCase : str = [len(grid) - 1, len(grid[0]) - 1]
lowerCamelCase : List[Any] = 1
# the cost map which pushes the path closer to the goal
lowerCamelCase : str = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowerCamelCase : Tuple = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowerCamelCase : str = 99
lowerCamelCase ,lowerCamelCase : Optional[Any] = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 649
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : str = None , __a : uuid.UUID = None , __a : Any=None , __a : List[Any]=None ) -> List[Any]:
"""simple docstring"""
if not conversation_id:
__lowercase : Any = uuid.uuida()
if past_user_inputs is None:
__lowercase : Dict = []
if generated_responses is None:
__lowercase : Dict = []
__lowercase : uuid.UUID = conversation_id
__lowercase : List[str] = past_user_inputs
__lowercase : List[str] = generated_responses
__lowercase : Optional[str] = text
def __eq__( self : Dict , __a : Dict ) -> Any:
"""simple docstring"""
if not isinstance(__a , __a ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase ( self : List[str] , __a : str , __a : bool = False ) -> Dict:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
__lowercase : Optional[int] = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
__lowercase : Dict = text
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowercase : Dict = None
def lowerCAmelCase ( self : Optional[int] , __a : str ) -> List[Any]:
"""simple docstring"""
self.generated_responses.append(__a )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : int ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
__lowercase : Optional[Any] = """user""" if is_user else """bot"""
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
__a , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , *__a : int , **__a : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*__a , **__a )
if self.tokenizer.pad_token_id is None:
__lowercase : List[Any] = self.tokenizer.eos_token
def lowerCAmelCase ( self : Union[str, Any] , __a : int=None , __a : Tuple=None , __a : Any=None , **__a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = {}
__lowercase : Tuple = {}
__lowercase : List[str] = {}
if min_length_for_response is not None:
__lowercase : Dict = min_length_for_response
if minimum_tokens is not None:
__lowercase : Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
__lowercase : Union[str, Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowercase : Union[str, Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__a )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[int] , __a : Union[Conversation, List[Conversation]] , __a : Dict=0 , **__a : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = super().__call__(__a , num_workers=__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase ( self : Union[str, Any] , __a : Conversation , __a : Tuple=32 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowercase : List[Any] = self.tokenizer._build_conversation_input_ids(__a )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowercase : Tuple = self._legacy_parse_and_tokenize(__a )
if self.framework == "pt":
__lowercase : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowercase : List[str] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase ( self : Any , __a : Dict , __a : Any=10 , **__a : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowercase : List[Any] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
__lowercase : Any = max_length - minimum_tokens
__lowercase : int = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowercase : Dict = model_inputs["""attention_mask"""][:, -trim:]
__lowercase : Union[str, Any] = model_inputs.pop("""conversation""" )
__lowercase : Tuple = max_length
__lowercase : int = self.model.generate(**__a , **__a )
if self.model.config.is_encoder_decoder:
__lowercase : Optional[int] = 1
else:
__lowercase : str = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase ( self : int , __a : Tuple , __a : List[Any]=True ) -> List[str]:
"""simple docstring"""
__lowercase : int = model_outputs["""output_ids"""]
__lowercase : Union[str, Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , )
__lowercase : List[str] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(__a )
return conversation
def lowerCAmelCase ( self : int , __a : Conversation ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = self.tokenizer.eos_token_id
__lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) )
if len(__a ) > self.tokenizer.model_max_length:
__lowercase : List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 649
| 1
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple , __a : int , __a : int ) -> int:
"""simple docstring"""
__lowercase : Optional[int] = jnp.ones((batch_size, length) ) / length
return scores
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase : str = None
__lowercase : List[str] = 20
__lowercase : List[Any] = self._get_uniform_logits(batch_size=2 , length=__a )
# tweak scores to not be uniform anymore
__lowercase : Dict = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__lowercase : Dict = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__lowercase : Union[str, Any] = jax.nn.softmax(__a , axis=-1 )
__lowercase : Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowercase : Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=1.3 )
__lowercase : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(__a , scores.copy() , cur_len=__a ) , axis=-1 )
__lowercase : int = jax.nn.softmax(temp_dist_warper_smoother(__a , scores.copy() , cur_len=__a ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = None
__lowercase : Optional[Any] = 10
__lowercase : Optional[Any] = 2
# create ramp distribution
__lowercase : Optional[Any] = np.broadcast_to(np.arange(__a )[None, :] , (batch_size, vocab_size) ).copy()
__lowercase : Union[str, Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
__lowercase : List[Any] = FlaxTopKLogitsWarper(3 )
__lowercase : Union[str, Any] = top_k_warp(__a , __a , cur_len=__a )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__lowercase : List[Any] = 5
__lowercase : Dict = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__lowercase : Optional[Any] = np.broadcast_to(np.arange(__a )[None, :] , (batch_size, length) ).copy()
__lowercase : str = top_k_warp_safety_check(__a , __a , cur_len=__a )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = None
__lowercase : int = 10
__lowercase : Tuple = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__lowercase : Union[str, Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__lowercase : Optional[Any] = FlaxTopPLogitsWarper(0.8 )
__lowercase : List[Any] = np.exp(top_p_warp(__a , __a , cur_len=__a ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__lowercase : List[Any] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(__a , __a , atol=1E-3 ) )
# check edge cases with negative and extreme logits
__lowercase : List[str] = np.broadcast_to(np.arange(__a )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__lowercase : Tuple = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
__lowercase : List[str] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__lowercase : Dict = top_p_warp(__a , __a , cur_len=__a )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = 20
__lowercase : Optional[int] = 4
__lowercase : Union[str, Any] = 0
__lowercase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__a )
# check that min length is applied at length 5
__lowercase : Optional[int] = ids_tensor((batch_size, 20) , vocab_size=20 )
__lowercase : List[Any] = 5
__lowercase : List[Any] = self._get_uniform_logits(__a , __a )
__lowercase : Optional[int] = min_dist_processor(__a , __a , cur_len=__a )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
__lowercase : List[Any] = self._get_uniform_logits(__a , __a )
__lowercase : Any = 15
__lowercase : int = min_dist_processor(__a , __a , cur_len=__a )
self.assertFalse(jnp.isinf(__a ).any() )
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : str = 20
__lowercase : Optional[Any] = 4
__lowercase : Optional[Any] = 0
__lowercase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__a )
# check that all scores are -inf except the bos_token_id score
__lowercase : Dict = ids_tensor((batch_size, 1) , vocab_size=20 )
__lowercase : int = 1
__lowercase : Any = self._get_uniform_logits(__a , __a )
__lowercase : List[str] = logits_processor(__a , __a , cur_len=__a )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__lowercase : Any = 3
__lowercase : List[Any] = self._get_uniform_logits(__a , __a )
__lowercase : Any = logits_processor(__a , __a , cur_len=__a )
self.assertFalse(jnp.isinf(__a ).any() )
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Dict = 20
__lowercase : Tuple = 4
__lowercase : str = 0
__lowercase : Optional[Any] = 5
__lowercase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=__a , eos_token_id=__a )
# check that all scores are -inf except the eos_token_id when max_length is reached
__lowercase : List[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
__lowercase : Union[str, Any] = 4
__lowercase : int = self._get_uniform_logits(__a , __a )
__lowercase : int = logits_processor(__a , __a , cur_len=__a )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__lowercase : List[str] = 3
__lowercase : List[Any] = self._get_uniform_logits(__a , __a )
__lowercase : List[str] = logits_processor(__a , __a , cur_len=__a )
self.assertFalse(jnp.isinf(__a ).any() )
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : List[Any] = 4
__lowercase : Any = 10
__lowercase : Optional[int] = 15
__lowercase : int = 2
__lowercase : Optional[int] = 1
__lowercase : Optional[Any] = 15
# dummy input_ids and scores
__lowercase : Dict = ids_tensor((batch_size, sequence_length) , __a )
__lowercase : int = input_ids.copy()
__lowercase : int = self._get_uniform_logits(__a , __a )
__lowercase : Tuple = scores.copy()
# instantiate all dist processors
__lowercase : int = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowercase : List[Any] = FlaxTopKLogitsWarper(3 )
__lowercase : List[str] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__lowercase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__a )
__lowercase : Tuple = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__a )
__lowercase : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=__a , eos_token_id=__a )
__lowercase : List[Any] = 10
# no processor list
__lowercase : Any = temp_dist_warp(__a , __a , cur_len=__a )
__lowercase : Dict = top_k_warp(__a , __a , cur_len=__a )
__lowercase : List[str] = top_p_warp(__a , __a , cur_len=__a )
__lowercase : int = min_dist_proc(__a , __a , cur_len=__a )
__lowercase : Any = bos_dist_proc(__a , __a , cur_len=__a )
__lowercase : Any = eos_dist_proc(__a , __a , cur_len=__a )
# with processor list
__lowercase : Dict = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__lowercase : str = processor(__a , __a , cur_len=__a )
# scores should be equal
self.assertTrue(jnp.allclose(__a , __a , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = 4
__lowercase : str = 10
__lowercase : Tuple = 15
__lowercase : str = 2
__lowercase : List[str] = 1
__lowercase : List[str] = 15
# dummy input_ids and scores
__lowercase : str = ids_tensor((batch_size, sequence_length) , __a )
__lowercase : str = input_ids.copy()
__lowercase : int = self._get_uniform_logits(__a , __a )
__lowercase : List[Any] = scores.copy()
# instantiate all dist processors
__lowercase : int = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowercase : str = FlaxTopKLogitsWarper(3 )
__lowercase : List[str] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__lowercase : Any = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__a )
__lowercase : Tuple = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__a )
__lowercase : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=__a , eos_token_id=__a )
__lowercase : int = 10
# no processor list
def run_no_processor_list(__a : Optional[Any] , __a : List[str] , __a : Union[str, Any] ):
__lowercase : str = temp_dist_warp(__a , __a , cur_len=__a )
__lowercase : List[Any] = top_k_warp(__a , __a , cur_len=__a )
__lowercase : Dict = top_p_warp(__a , __a , cur_len=__a )
__lowercase : int = min_dist_proc(__a , __a , cur_len=__a )
__lowercase : str = bos_dist_proc(__a , __a , cur_len=__a )
__lowercase : Union[str, Any] = eos_dist_proc(__a , __a , cur_len=__a )
return scores
# with processor list
def run_processor_list(__a : Optional[Any] , __a : int , __a : Dict ):
__lowercase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__lowercase : Optional[Any] = processor(__a , __a , cur_len=__a )
return scores
__lowercase : int = jax.jit(__a )
__lowercase : Optional[int] = jax.jit(__a )
__lowercase : str = jitted_run_no_processor_list(__a , __a , __a )
__lowercase : List[str] = jitted_run_processor_list(__a , __a , __a )
# scores should be equal
self.assertTrue(jnp.allclose(__a , __a , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 649
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__a , """depth_multiplier""" ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Tuple , __a : str=13 , __a : Dict=3 , __a : List[Any]=32 , __a : Any=0.25 , __a : Any=8 , __a : Optional[int]=8 , __a : Optional[int]=6 , __a : Dict=32 , __a : Tuple=True , __a : List[Any]=True , __a : Optional[int]=True , __a : Tuple="relu6" , __a : Optional[Any]=1280 , __a : str=0.1 , __a : str=0.02 , __a : Optional[Any]=True , __a : Tuple=True , __a : Dict=10 , __a : Optional[Any]=None , ) -> Any:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Tuple = batch_size
__lowercase : Dict = num_channels
__lowercase : Optional[int] = image_size
__lowercase : int = depth_multiplier
__lowercase : str = depth_divisible_by
__lowercase : int = min_depth
__lowercase : Tuple = expand_ratio
__lowercase : Optional[int] = tf_padding
__lowercase : Dict = output_stride
__lowercase : Dict = first_layer_is_expansion
__lowercase : Optional[Any] = finegrained_output
__lowercase : str = hidden_act
__lowercase : Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__lowercase : Optional[int] = classifier_dropout_prob
__lowercase : int = use_labels
__lowercase : Optional[int] = is_training
__lowercase : Dict = num_labels
__lowercase : Tuple = initializer_range
__lowercase : Optional[Any] = scope
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : List[Any] = None
__lowercase : Optional[Any] = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = MobileNetVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase ( self : List[str] , __a : Optional[int] , __a : List[str] , __a : str , __a : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = self.num_labels
__lowercase : Dict = MobileNetVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __a : List[str] , __a : Tuple , __a : Any , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.num_labels
__lowercase : List[Any] = MobileNetVaForSemanticSegmentation(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase : str = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : List[str] = config_and_inputs
__lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : List[str] = False
_A : List[str] = False
_A : Optional[int] = False
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = MobileNetVaModelTester(self )
__lowercase : int = MobileNetVaConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : int = [*signature.parameters.keys()]
__lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__a : List[Any] , __a : Tuple , __a : List[str] ):
__lowercase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Tuple = outputs.hidden_states
__lowercase : str = 16
self.assertEqual(len(__a ) , __a )
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Any = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = MobileNetVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Tuple = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__a )
__lowercase : str = self.default_image_processor
__lowercase : Tuple = prepare_img()
__lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : str = model(**__a )
# verify the logits
__lowercase : Union[str, Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : int = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : Dict = model.to(__a )
__lowercase : Tuple = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[int] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Any = outputs.logits
# verify the logits
__lowercase : Dict = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __a )
__lowercase : str = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) )
| 649
| 1
|
def snake_case_ ( lowerCAmelCase_ : list , lowerCAmelCase_ : list , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
if index == number_of_items:
return 0
__lowercase : Optional[Any] = 0
__lowercase : int = 0
__lowercase : Any = knapsack(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , index + 1 )
if weights[index] <= max_weight:
__lowercase : str = values[index] + knapsack(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , max_weight - weights[index] , index + 1 )
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def snake_case_ ( lowerCAmelCase_ : bool = True , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[str] ):
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
__lowercase : List[str] = False
if main_process_only:
__lowercase : Optional[int] = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ , disable=lowerCAmelCase_ )
| 649
| 1
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : int , *__a : Dict , **__a : Optional[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , __a , )
super().__init__(*__a , **__a )
| 649
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : list[int] ):
if not nums:
return 0
__lowercase : Tuple = nums[0]
__lowercase : Tuple = 0
for num in nums[1:]:
__lowercase , __lowercase : List[str] = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Any = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 649
|
lowerCamelCase : List[str] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 649
| 1
|
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCamelCase : Tuple = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
lowerCamelCase : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
lowerCamelCase : Tuple = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCamelCase : str = f'''down_blocks.{i}.resnets.{j}.'''
lowerCamelCase : Optional[Any] = f'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCamelCase : Optional[Any] = f'''down_blocks.{i}.attentions.{j}.'''
lowerCamelCase : str = f'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCamelCase : Union[str, Any] = f'''up_blocks.{i}.resnets.{j}.'''
lowerCamelCase : Optional[Any] = f'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCamelCase : List[Any] = f'''up_blocks.{i}.attentions.{j}.'''
lowerCamelCase : Optional[int] = f'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCamelCase : List[str] = f'''down_blocks.{i}.downsamplers.0.conv.'''
lowerCamelCase : List[str] = f'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCamelCase : Optional[int] = f'''up_blocks.{i}.upsamplers.0.'''
lowerCamelCase : Optional[int] = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCamelCase : Optional[Any] = '''mid_block.attentions.0.'''
lowerCamelCase : Tuple = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCamelCase : Optional[Any] = f'''mid_block.resnets.{j}.'''
lowerCamelCase : int = f'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def snake_case_ ( lowerCAmelCase_ : Optional[int] ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
__lowercase : List[str] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
__lowercase : Union[str, Any] = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
__lowercase : List[str] = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : str = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
__lowercase : int = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : List[Any] = v
__lowercase : List[str] = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCamelCase : List[str] = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCamelCase : str = f'''encoder.down_blocks.{i}.resnets.{j}.'''
lowerCamelCase : Dict = f'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCamelCase : List[str] = f'''down_blocks.{i}.downsamplers.0.'''
lowerCamelCase : Tuple = f'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCamelCase : Union[str, Any] = f'''up_blocks.{i}.upsamplers.0.'''
lowerCamelCase : List[str] = f'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCamelCase : Dict = f'''decoder.up_blocks.{i}.resnets.{j}.'''
lowerCamelCase : Any = f'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCamelCase : Optional[Any] = f'''mid_block.resnets.{i}.'''
lowerCamelCase : Dict = f'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCamelCase : str = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def snake_case_ ( lowerCAmelCase_ : Tuple ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ):
__lowercase : str = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
__lowercase : Optional[int] = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Optional[int] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
__lowercase : Any = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Optional[Any] = v
__lowercase : Any = {v: vae_state_dict[k] for k, v in mapping.items()}
__lowercase : str = ["""q""", """k""", """v""", """proj_out"""]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"mid.attn_1.{weight_name}.weight" in k:
print(F"Reshaping {k} for SD format" )
__lowercase : Dict = reshape_weight_for_sd(lowerCAmelCase_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCamelCase : Tuple = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
lowerCamelCase : Union[str, Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCamelCase : Any = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCamelCase : Dict = {'''q''': 0, '''k''': 1, '''v''': 2}
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : List[Any] = {}
__lowercase : Tuple = {}
__lowercase : Dict = {}
for k, v in text_enc_dict.items():
if (
k.endswith(""".self_attn.q_proj.weight""" )
or k.endswith(""".self_attn.k_proj.weight""" )
or k.endswith(""".self_attn.v_proj.weight""" )
):
__lowercase : List[str] = k[: -len(""".q_proj.weight""" )]
__lowercase : Any = k[-len("""q_proj.weight""" )]
if k_pre not in capture_qkv_weight:
__lowercase : Any = [None, None, None]
__lowercase : List[Any] = v
continue
if (
k.endswith(""".self_attn.q_proj.bias""" )
or k.endswith(""".self_attn.k_proj.bias""" )
or k.endswith(""".self_attn.v_proj.bias""" )
):
__lowercase : Optional[Any] = k[: -len(""".q_proj.bias""" )]
__lowercase : Optional[Any] = k[-len("""q_proj.bias""" )]
if k_pre not in capture_qkv_bias:
__lowercase : Any = [None, None, None]
__lowercase : List[str] = v
continue
__lowercase : List[Any] = textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase_ )
__lowercase : Union[str, Any] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
__lowercase : Any = textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase_ )
__lowercase : List[Any] = torch.cat(lowerCAmelCase_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
__lowercase : Union[str, Any] = textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase_ )
__lowercase : str = torch.cat(lowerCAmelCase_ )
return new_state_dict
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ):
return text_enc_dict
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
lowerCamelCase : List[Any] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCamelCase : Dict = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
lowerCamelCase : Tuple = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
lowerCamelCase : List[str] = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCamelCase : Any = load_file(unet_path, device='''cpu''')
else:
lowerCamelCase : Optional[Any] = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
lowerCamelCase : List[Any] = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
lowerCamelCase : List[str] = load_file(vae_path, device='''cpu''')
else:
lowerCamelCase : Tuple = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
lowerCamelCase : str = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
lowerCamelCase : Optional[Any] = load_file(text_enc_path, device='''cpu''')
else:
lowerCamelCase : Any = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
lowerCamelCase : Dict = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
lowerCamelCase : Dict = convert_unet_state_dict(unet_state_dict)
lowerCamelCase : Optional[int] = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCamelCase : Optional[int] = convert_vae_state_dict(vae_state_dict)
lowerCamelCase : Optional[Any] = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCamelCase : Union[str, Any] = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCamelCase : int = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
lowerCamelCase : Tuple = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCamelCase : str = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
lowerCamelCase : Union[str, Any] = convert_text_enc_state_dict(text_enc_dict)
lowerCamelCase : Optional[int] = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCamelCase : List[Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCamelCase : Union[str, Any] = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCamelCase : List[str] = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 649
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Any=False ):
__lowercase : Any = """backbone.""" if is_semantic else """"""
__lowercase : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", """beit.embeddings.cls_token"""),
(F"{prefix}patch_embed.proj.weight", """beit.embeddings.patch_embeddings.projection.weight"""),
(F"{prefix}patch_embed.proj.bias", """beit.embeddings.patch_embeddings.projection.bias"""),
(F"{prefix}pos_embed", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : List[Any]=False ):
for i in range(config.num_hidden_layers ):
__lowercase : Tuple = """backbone.""" if is_semantic else """"""
# queries, keys and values
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
__lowercase : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
__lowercase : List[str] = in_proj_weight[
: config.hidden_size, :
]
__lowercase : Union[str, Any] = q_bias
__lowercase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
__lowercase : str = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
__lowercase : str = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
__lowercase : List[str] = gamma_a
__lowercase : Optional[int] = gamma_a
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
__lowercase : Tuple = dct.pop(lowerCAmelCase_ )
__lowercase : Tuple = val
def snake_case_ ( ):
__lowercase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : Any = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=False ):
__lowercase : Dict = False if """rvlcdip""" in checkpoint_url else True
__lowercase : Tuple = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase_ , use_mask_token=lowerCAmelCase_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__lowercase : Union[str, Any] = 1024
__lowercase : Optional[int] = 4096
__lowercase : List[Any] = 24
__lowercase : Dict = 16
# labels
if "rvlcdip" in checkpoint_url:
__lowercase : Optional[int] = 16
__lowercase : Any = """huggingface/label-files"""
__lowercase : Union[str, Any] = """rvlcdip-id2label.json"""
__lowercase : List[str] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Optional[int] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__lowercase : Optional[int] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )["""model"""]
__lowercase : Union[str, Any] = create_rename_keys(lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
# load HuggingFace model
__lowercase : Dict = BeitForMaskedImageModeling(lowerCAmelCase_ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
__lowercase : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" )
__lowercase : Optional[int] = encoding["""pixel_values"""]
__lowercase : str = model(lowerCAmelCase_ )
__lowercase : Tuple = outputs.logits
# verify logits
__lowercase : str = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowerCAmelCase_ ), "Shape of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
if has_lm_head:
__lowercase : Optional[Any] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
__lowercase : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase_ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
lowerCamelCase : List[str] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 649
| 1
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase : Tuple = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str ):
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def snake_case_ ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Optional[str] , lowerCAmelCase_ : Optional[str] = None ):
__lowercase : Any = tesseract_config if tesseract_config is not None else """"""
# apply OCR
__lowercase : Optional[Any] = to_pil_image(lowerCAmelCase_ )
__lowercase , __lowercase : Optional[Any] = pil_image.size
__lowercase : Union[str, Any] = pytesseract.image_to_data(lowerCAmelCase_ , lang=lowerCAmelCase_ , output_type="""dict""" , config=lowerCAmelCase_ )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Any = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
__lowercase : int = [idx for idx, word in enumerate(lowerCAmelCase_ ) if not word.strip()]
__lowercase : Dict = [word for idx, word in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
__lowercase : List[Any] = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
__lowercase : Optional[Any] = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
__lowercase : Dict = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
__lowercase : Any = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__lowercase : int = []
for x, y, w, h in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : Tuple = [x, y, x + w, y + h]
actual_boxes.append(lowerCAmelCase_ )
# finally, normalize the bounding boxes
__lowercase : int = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Any = ['''pixel_values''']
def __init__( self : Optional[int] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Optional[str] = None , __a : Optional[str] = "" , **__a : str , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Union[str, Any] = size if size is not None else {"""height""": 224, """width""": 224}
__lowercase : Any = get_size_dict(__a )
__lowercase : List[Any] = do_resize
__lowercase : Optional[int] = size
__lowercase : Optional[int] = resample
__lowercase : Union[str, Any] = apply_ocr
__lowercase : Tuple = ocr_lang
__lowercase : Optional[Any] = tesseract_config
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Any = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
__lowercase : Optional[Any] = (size["""height"""], size["""width"""])
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Dict , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Optional[str] = None , __a : Optional[str] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : Any , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
__lowercase : Union[str, Any] = size if size is not None else self.size
__lowercase : Tuple = get_size_dict(__a )
__lowercase : Tuple = resample if resample is not None else self.resample
__lowercase : Union[str, Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
__lowercase : Optional[int] = ocr_lang if ocr_lang is not None else self.ocr_lang
__lowercase : str = tesseract_config if tesseract_config is not None else self.tesseract_config
__lowercase : Optional[Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
__lowercase : Dict = [to_numpy_array(__a ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
__lowercase : Union[str, Any] = []
__lowercase : Optional[int] = []
for image in images:
__lowercase , __lowercase : Optional[int] = apply_tesseract(__a , __a , __a )
words_batch.append(__a )
boxes_batch.append(__a )
if do_resize:
__lowercase : List[Any] = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
__lowercase : Any = [flip_channel_order(__a ) for image in images]
__lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Any = BatchFeature(data={"""pixel_values""": images} , tensor_type=__a )
if apply_ocr:
__lowercase : Tuple = words_batch
__lowercase : int = boxes_batch
return data
| 649
|
from torch import nn
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __a : int , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowercase : int = class_size
__lowercase : int = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowercase : str = nn.Linear(__a , __a )
def lowerCAmelCase ( self : Tuple , __a : int ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.mlp(__a )
return logits
| 649
| 1
|
from manim import *
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = Rectangle(height=0.5 , width=0.5 )
__lowercase : Union[str, Any] = Rectangle(height=0.25 , width=0.25 )
__lowercase : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__lowercase : List[Any] = [mem.copy() for i in range(6 )]
__lowercase : Optional[Any] = [mem.copy() for i in range(6 )]
__lowercase : str = VGroup(*__a ).arrange(__a , buff=0 )
__lowercase : Tuple = VGroup(*__a ).arrange(__a , buff=0 )
__lowercase : Union[str, Any] = VGroup(__a , __a ).arrange(__a , buff=0 )
__lowercase : List[Any] = Text("""CPU""" , font_size=24 )
__lowercase : Optional[int] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
__lowercase : str = [mem.copy() for i in range(4 )]
__lowercase : Optional[int] = VGroup(*__a ).arrange(__a , buff=0 )
__lowercase : List[str] = Text("""GPU""" , font_size=24 )
__lowercase : Any = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
__lowercase : Tuple = [mem.copy() for i in range(6 )]
__lowercase : Dict = VGroup(*__a ).arrange(__a , buff=0 )
__lowercase : List[str] = Text("""Model""" , font_size=24 )
__lowercase : Any = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
__lowercase : List[str] = []
__lowercase : Dict = []
__lowercase : Tuple = []
for i, rect in enumerate(__a ):
rect.set_stroke(__a )
__lowercase : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__a , buff=0.0 )
self.add(__a )
model_cpu_arr.append(__a )
self.add(*__a , *__a , *__a )
__lowercase : List[Any] = [mem.copy() for i in range(6 )]
__lowercase : Optional[Any] = VGroup(*__a ).arrange(__a , buff=0 )
__lowercase : str = Text("""Loaded Checkpoint""" , font_size=24 )
__lowercase : Dict = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
checkpoint.move_to([3, 0.5, 0] )
self.add(__a )
__lowercase : List[Any] = []
__lowercase : str = []
for i, rect in enumerate(__a ):
__lowercase : List[Any] = fill.copy().set_fill(__a , opacity=0.7 )
target.move_to(__a )
ckpt_arr.append(__a )
__lowercase : str = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__a )
self.add(*__a , *__a )
__lowercase : Tuple = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowercase : int = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
__lowercase : Union[str, Any] = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__a )
__lowercase : Tuple = MarkupText(
F"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
__lowercase : Any = [meta_mem.copy() for i in range(6 )]
__lowercase : List[str] = [meta_mem.copy() for i in range(6 )]
__lowercase : List[Any] = VGroup(*__a ).arrange(__a , buff=0 )
__lowercase : List[Any] = VGroup(*__a ).arrange(__a , buff=0 )
__lowercase : List[Any] = VGroup(__a , __a ).arrange(__a , buff=0 )
__lowercase : Dict = Text("""Disk""" , font_size=24 )
__lowercase : Union[str, Any] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__a , run_time=3 ) , Write(__a , run_time=1 ) , Create(__a , run_time=1 ) )
__lowercase : Optional[int] = []
for i, rect in enumerate(__a ):
__lowercase : str = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(FadeOut(__a ) )
__lowercase : Dict = MarkupText(F"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__a , run_time=3 ) )
self.play(
FadeOut(__a , __a , *__a , *__a ) , )
self.wait()
| 649
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : str ):
__lowercase : Tuple = [x.strip() for x in open(lowerCAmelCase_ ).readlines()]
__lowercase : Dict = [x.strip() for x in open(lowerCAmelCase_ ).readlines()][: len(lowerCAmelCase_ )]
__lowercase : Tuple = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
if save_path is not None:
save_json(lowerCAmelCase_ , lowerCAmelCase_ , indent=lowerCAmelCase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 649
| 1
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] ):
__lowercase : Optional[int] = []
for part_id in partition_order:
__lowercase : List[Any] = df.where(F"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(lowerCAmelCase_ ):
expected_row_ids_and_row_dicts.append((F"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
__lowercase : Optional[int] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
__lowercase : str = spark.range(100 ).repartition(1 )
__lowercase : str = Spark(lowerCAmelCase_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
__lowercase : List[Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
__lowercase : List[str] = spark.range(10 ).repartition(2 )
__lowercase : List[Any] = [1, 0]
__lowercase : Optional[int] = _generate_iterable_examples(lowerCAmelCase_ , lowerCAmelCase_ ) # Reverse the partitions.
__lowercase : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase_ , lowerCAmelCase_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__lowercase , __lowercase : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
__lowercase : List[Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
__lowercase : Optional[Any] = spark.range(10 ).repartition(1 )
__lowercase : Dict = SparkExamplesIterable(lowerCAmelCase_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowerCAmelCase_ ):
assert row_id == F"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
__lowercase : Optional[Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
__lowercase : Optional[Any] = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
__lowercase : List[Any] = lambda lowerCAmelCase_ : x.reverse()
__lowercase : Dict = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase_ , [2, 1, 0] )
__lowercase : str = SparkExamplesIterable(lowerCAmelCase_ ).shuffle_data_sources(lowerCAmelCase_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowerCAmelCase_ ):
__lowercase , __lowercase : List[str] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
__lowercase : Dict = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
__lowercase : Dict = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__lowercase : int = SparkExamplesIterable(lowerCAmelCase_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__lowercase : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(lowerCAmelCase_ ):
__lowercase , __lowercase : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__lowercase : List[str] = SparkExamplesIterable(lowerCAmelCase_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__lowercase : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(lowerCAmelCase_ ):
__lowercase , __lowercase : Dict = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
__lowercase : Tuple = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
__lowercase : Dict = spark.range(100 ).repartition(1 )
__lowercase : Tuple = Spark(lowerCAmelCase_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 649
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case_ ( lowerCAmelCase_ : Dict ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase ( __a ):
'''simple docstring'''
@staticmethod
def lowerCAmelCase ( __a : ArgumentParser ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=__a , default=__a , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=__a , help="""Name of the model to download""" )
download_parser.set_defaults(func=__a )
def __init__( self : Dict , __a : str , __a : str , __a : bool , __a : bool ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = model
__lowercase : List[Any] = cache
__lowercase : Any = force
__lowercase : Optional[int] = trust_remote_code
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 649
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : int = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Union[str, Any] = '''wavlm'''
def __init__( self : str , __a : List[Any]=32 , __a : int=768 , __a : Optional[int]=12 , __a : Dict=12 , __a : Any=3072 , __a : List[Any]="gelu" , __a : Optional[int]=0.1 , __a : List[Any]=0.1 , __a : Optional[Any]=0.1 , __a : str=0.0 , __a : List[str]=0.1 , __a : List[str]=0.1 , __a : int=0.02 , __a : Optional[int]=1E-5 , __a : Optional[Any]="group" , __a : Dict="gelu" , __a : Any=(512, 512, 512, 512, 512, 512, 512) , __a : Dict=(5, 2, 2, 2, 2, 2, 2) , __a : Tuple=(10, 3, 3, 3, 3, 2, 2) , __a : Dict=False , __a : str=128 , __a : Optional[int]=16 , __a : List[Any]=320 , __a : Any=800 , __a : int=False , __a : List[str]=True , __a : Union[str, Any]=0.05 , __a : Optional[Any]=10 , __a : str=2 , __a : Dict=0.0 , __a : Optional[int]=10 , __a : List[str]=320 , __a : Dict=2 , __a : List[Any]=0.1 , __a : Optional[Any]=100 , __a : str=256 , __a : List[str]=256 , __a : str=0.1 , __a : Dict="mean" , __a : Optional[Any]=False , __a : Union[str, Any]=False , __a : Tuple=256 , __a : Optional[Any]=(512, 512, 512, 512, 1500) , __a : int=(5, 3, 3, 1, 1) , __a : List[str]=(1, 2, 3, 1, 1) , __a : int=512 , __a : str=80 , __a : Tuple=0 , __a : List[str]=1 , __a : Union[str, Any]=2 , __a : Tuple=False , __a : List[Any]=3 , __a : List[str]=2 , __a : Dict=3 , __a : int=None , **__a : List[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
__lowercase : List[str] = hidden_size
__lowercase : Optional[int] = feat_extract_norm
__lowercase : Union[str, Any] = feat_extract_activation
__lowercase : Optional[Any] = list(__a )
__lowercase : Union[str, Any] = list(__a )
__lowercase : Union[str, Any] = list(__a )
__lowercase : List[str] = conv_bias
__lowercase : List[str] = num_buckets
__lowercase : int = max_bucket_distance
__lowercase : Union[str, Any] = num_conv_pos_embeddings
__lowercase : str = num_conv_pos_embedding_groups
__lowercase : List[str] = len(self.conv_dim )
__lowercase : str = num_hidden_layers
__lowercase : List[Any] = intermediate_size
__lowercase : Union[str, Any] = hidden_act
__lowercase : str = num_attention_heads
__lowercase : Union[str, Any] = hidden_dropout
__lowercase : Optional[Any] = attention_dropout
__lowercase : Tuple = activation_dropout
__lowercase : str = feat_proj_dropout
__lowercase : Dict = final_dropout
__lowercase : Optional[Any] = layerdrop
__lowercase : Any = layer_norm_eps
__lowercase : Any = initializer_range
__lowercase : Optional[Any] = num_ctc_classes
__lowercase : int = vocab_size
__lowercase : int = do_stable_layer_norm
__lowercase : int = use_weighted_layer_sum
__lowercase : Optional[int] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase : List[Any] = apply_spec_augment
__lowercase : List[str] = mask_time_prob
__lowercase : str = mask_time_length
__lowercase : Optional[Any] = mask_time_min_masks
__lowercase : Optional[int] = mask_feature_prob
__lowercase : Any = mask_feature_length
# parameters for pretraining with codevector quantized representations
__lowercase : Any = num_codevectors_per_group
__lowercase : Dict = num_codevector_groups
__lowercase : Optional[int] = contrastive_logits_temperature
__lowercase : Optional[int] = num_negatives
__lowercase : str = codevector_dim
__lowercase : Dict = proj_codevector_dim
__lowercase : Tuple = diversity_loss_weight
# ctc loss
__lowercase : int = ctc_loss_reduction
__lowercase : Any = ctc_zero_infinity
# adapter
__lowercase : int = add_adapter
__lowercase : Union[str, Any] = adapter_kernel_size
__lowercase : Optional[int] = adapter_stride
__lowercase : Tuple = num_adapter_layers
__lowercase : List[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowercase : List[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowercase : Dict = list(__a )
__lowercase : str = list(__a )
__lowercase : List[Any] = list(__a )
__lowercase : Union[str, Any] = xvector_output_dim
@property
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 649
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCamelCase : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __a : List[str] , __a : Optional[int]=16 , __a : Optional[Any]=13 , __a : str=7 , __a : List[str]=14 , __a : Any=10 , __a : str=19 , __a : int=5 , __a : Any=4 , __a : List[Any]=True , __a : Tuple=16 , __a : Dict=2 , __a : Tuple=4 , __a : int=4 , __a : List[Any]="gelu" , __a : Tuple=0.1 , __a : List[str]=0.1 , __a : int=[1, 2, 3, 4, 5] , __a : str=25 , __a : Any=5 , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = d_model
__lowercase : Dict = parent
__lowercase : Tuple = batch_size
__lowercase : Optional[int] = prediction_length
__lowercase : List[str] = context_length
__lowercase : Any = cardinality
__lowercase : str = num_time_features
__lowercase : Optional[int] = lags_sequence
__lowercase : Optional[Any] = embedding_dimension
__lowercase : List[Any] = is_training
__lowercase : List[str] = hidden_size
__lowercase : int = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : int = hidden_act
__lowercase : str = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : str = context_length
__lowercase : int = prediction_length + label_length
__lowercase : Union[str, Any] = label_length
__lowercase : Optional[int] = moving_average
__lowercase : Optional[Any] = autocorrelation_factor
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCAmelCase ( self : Tuple , __a : str ) -> int:
"""simple docstring"""
__lowercase : Any = config.context_length + max(config.lags_sequence )
__lowercase : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowercase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowercase : Dict = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowercase : str = floats_tensor([self.batch_size, config.prediction_length] )
__lowercase : List[str] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_config()
__lowercase : Any = self.prepare_autoformer_inputs_dict(__a )
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] , __a : Tuple , __a : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel(config=__a ).to(__a ).eval()
__lowercase : Optional[int] = model(**__a )
__lowercase : Dict = outputs.encoder_last_hidden_state
__lowercase : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : List[str] = model.get_encoder()
encoder.save_pretrained(__a )
__lowercase : List[str] = AutoformerEncoder.from_pretrained(__a ).to(__a )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Any = model.create_network_inputs(**__a )
__lowercase , __lowercase : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowercase : Optional[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowercase : Union[str, Any] = encoder(inputs_embeds=__a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__lowercase : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowercase : Optional[int] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowercase : Any = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowercase : Dict = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Optional[Any] = model.get_decoder()
decoder.save_pretrained(__a )
__lowercase : Tuple = AutoformerDecoder.from_pretrained(__a ).to(__a )
__lowercase : str = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_A : List[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_A : Any = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_A : Dict = False
_A : Tuple = False
_A : Optional[int] = False
_A : Tuple = False
_A : str = False
_A : Union[str, Any] = False
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : List[str] = AutoformerModelTester(self )
__lowercase : Dict = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase , __lowercase : Tuple = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : Any = inspect.signature(getattr(__a , """forward""" ) )
# The main input is the name of the argument after `self`
__lowercase : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __a )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
__lowercase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Any = [*signature.parameters.keys()]
__lowercase : int = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__a )] , __a )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : int = True
__lowercase : Tuple = getattr(self.model_tester , """seq_length""" , __a )
__lowercase : Union[str, Any] = getattr(self.model_tester , """decoder_seq_length""" , __a )
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """d_model""" , __a )
__lowercase : Optional[int] = getattr(self.model_tester , """num_attention_heads""" , __a )
__lowercase : Any = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowercase : Dict = True
__lowercase : List[str] = False
__lowercase : Optional[int] = True
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : int = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase : Optional[int] = True
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Dict = outputs.encoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowercase : Tuple = len(__a )
__lowercase : str = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a )
# decoder attentions
__lowercase : List[Any] = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowercase : Optional[int] = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowercase : Tuple = True
__lowercase : Union[str, Any] = True
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Any = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 2 , len(__a ) )
__lowercase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def snake_case_ ( lowerCAmelCase_ : Optional[int]="train-batch.pt" ):
__lowercase : Dict = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=lowerCAmelCase_ , repo_type="""dataset""" )
__lowercase : Optional[int] = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
return batch
@require_torch
@slow
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[Any] = prepare_batch()
with torch.no_grad():
__lowercase : Tuple = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowercase : List[str] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[str] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : Optional[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowercase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : Optional[int] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : int = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowercase : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __a )
__lowercase : Optional[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__a )
__lowercase : Dict = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1E-1 ) )
| 649
| 1
|
def snake_case_ ( lowerCAmelCase_ : list ):
if len(lowerCAmelCase_ ) < 2:
return collection
def circle_sort_util(lowerCAmelCase_ : list , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> bool:
__lowercase : Optional[int] = False
if low == high:
return swapped
__lowercase : Optional[Any] = low
__lowercase : Any = high
while left < right:
if collection[left] > collection[right]:
__lowercase , __lowercase : List[Any] = (
collection[right],
collection[left],
)
__lowercase : Any = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__lowercase , __lowercase : Any = (
collection[right + 1],
collection[left],
)
__lowercase : int = True
__lowercase : int = low + int((high - low) / 2 )
__lowercase : Union[str, Any] = circle_sort_util(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : List[Any] = circle_sort_util(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ )
return swapped or left_swap or right_swap
__lowercase : str = True
while is_not_sorted is True:
__lowercase : List[Any] = circle_sort_util(lowerCAmelCase_ , 0 , len(lowerCAmelCase_ ) - 1 )
return collection
if __name__ == "__main__":
lowerCamelCase : Tuple = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase : Tuple = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 649
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCAmelCase ( __a , __a ):
'''simple docstring'''
_A : str = 1
@register_to_config
def __init__( self : Optional[int] , __a : Tuple=2000 , __a : List[str]=0.1 , __a : str=20 , __a : Optional[int]=1E-3 ) -> int:
"""simple docstring"""
__lowercase : Tuple = None
__lowercase : Union[str, Any] = None
__lowercase : int = None
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Union[str, torch.device] = None ) -> str:
"""simple docstring"""
__lowercase : List[str] = torch.linspace(1 , self.config.sampling_eps , __a , device=__a )
def lowerCAmelCase ( self : Tuple , __a : List[Any] , __a : Tuple , __a : int , __a : Optional[int]=None ) -> str:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowercase : Dict = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowercase : int = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowercase : Union[str, Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowercase : Optional[Any] = std.unsqueeze(-1 )
__lowercase : List[Any] = -score / std
# compute
__lowercase : Dict = -1.0 / len(self.timesteps )
__lowercase : int = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowercase : List[Any] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowercase : Union[str, Any] = beta_t.unsqueeze(-1 )
__lowercase : List[str] = -0.5 * beta_t * x
__lowercase : int = torch.sqrt(__a )
__lowercase : Union[str, Any] = drift - diffusion**2 * score
__lowercase : Optional[Any] = x + drift * dt
# add noise
__lowercase : List[str] = randn_tensor(x.shape , layout=x.layout , generator=__a , device=x.device , dtype=x.dtype )
__lowercase : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.config.num_train_timesteps
| 649
| 1
|
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowerCamelCase : Tuple = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : int = 14 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError("""Unsupported Group""" )
__lowercase : Tuple = primes[group]["""prime"""]
__lowercase : str = primes[group]["""generator"""]
__lowercase : Union[str, Any] = int(hexlify(urandom(32 ) ) , base=16 )
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase : Dict = pow(self.generator , self.__private_key , self.prime )
return hex(__a )[2:]
def lowerCAmelCase ( self : Optional[int] , __a : int ) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(__a , (self.prime - 1) // 2 , self.prime ) == 1
)
def lowerCAmelCase ( self : List[str] , __a : str ) -> str:
"""simple docstring"""
__lowercase : Dict = int(__a , base=16 )
if not self.is_valid_public_key(__a ):
raise ValueError("""Invalid public key""" )
__lowercase : Dict = pow(__a , self.__private_key , self.prime )
return shaaaa(str(__a ).encode() ).hexdigest()
@staticmethod
def lowerCAmelCase ( __a : int , __a : int ) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(__a , (prime - 1) // 2 , __a ) == 1
)
@staticmethod
def lowerCAmelCase ( __a : str , __a : str , __a : int = 14 ) -> str:
"""simple docstring"""
__lowercase : str = int(__a , base=16 )
__lowercase : Any = int(__a , base=16 )
__lowercase : str = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(__a , __a ):
raise ValueError("""Invalid public key""" )
__lowercase : str = pow(__a , __a , __a )
return shaaaa(str(__a ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : str = LongformerTokenizer
_A : int = True
_A : Optional[int] = LongformerTokenizerFast
_A : int = True
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowercase : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
__lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def lowerCAmelCase ( self : Optional[int] , **__a : Optional[Any] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Tuple , **__a : Tuple ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = """lower newer"""
__lowercase : int = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase : Dict = """lower newer"""
__lowercase : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowercase : str = tokenizer.tokenize(__a ) # , add_prefix_space=True)
self.assertListEqual(__a , __a )
__lowercase : int = tokens + [tokenizer.unk_token]
__lowercase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
__lowercase : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__a )
__lowercase : List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a )
__lowercase : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__lowercase : Any = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Tuple = """Encode this sequence."""
__lowercase : Optional[Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
__lowercase : Dict = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__a , __a )
__lowercase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__a , __a )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__lowercase : str = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__a , __a )
# Testing spaces after special tokens
__lowercase : List[Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__a , lstrip=__a , rstrip=__a )} ) # mask token has a left space
__lowercase : Dict = tokenizer.convert_tokens_to_ids(__a )
__lowercase : List[str] = """Encode <mask> sequence"""
__lowercase : List[str] = """Encode <mask>sequence"""
__lowercase : Union[str, Any] = tokenizer.encode(__a )
__lowercase : Dict = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__a , __a )
__lowercase : int = tokenizer.encode(__a )
__lowercase : Union[str, Any] = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__a , __a )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowercase : List[Any] = self.tokenizer_class.from_pretrained(__a , **__a )
__lowercase : Optional[Any] = """A, <mask> AllenNLP sentence."""
__lowercase : Union[str, Any] = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
__lowercase : Optional[Any] = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__lowercase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowercase : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""trim_offsets"""] , __a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase : int = F"{text_of_1_token} {text_of_1_token}"
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Any = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : int = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ) + 1, 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Dict = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
| 649
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
lowerCamelCase : Tuple = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCamelCase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def snake_case_ ( lowerCAmelCase_ : str ):
with open(lowerCAmelCase_ , """rb""" ) as f:
__lowercase : Tuple = Image.open(lowerCAmelCase_ )
return im.convert("""RGB""" )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : Optional[str] = field(
default=__a , metadata={
'''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'''
} , )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_A : Optional[str] = field(default=__a , metadata={'''help''': '''A folder containing the training data.'''} )
_A : Optional[str] = field(default=__a , metadata={'''help''': '''A folder containing the validation data.'''} )
_A : Optional[float] = field(
default=0.1_5 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
_A : Optional[int] = field(
default=__a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_A : Optional[int] = field(
default=__a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"""You must specify either a dataset name from the hub or a train and/or validation directory.""" )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : str = field(
default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__a )} , )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
_A : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_A : str = field(default=__a , metadata={'''help''': '''Name or path of preprocessor config.'''} )
_A : bool = field(
default=__a , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
_A : bool = field(
default=__a , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] ):
__lowercase : Tuple = torch.stack([example["""pixel_values"""] for example in examples] )
__lowercase : Dict = torch.tensor([example["""labels"""] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def snake_case_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase , __lowercase , __lowercase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase , __lowercase , __lowercase : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_image_classification""" , lowerCAmelCase_ , lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowercase : Any = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
__lowercase : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
__lowercase : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="""image-classification""" , use_auth_token=True if model_args.use_auth_token else None , )
else:
__lowercase : Tuple = {}
if data_args.train_dir is not None:
__lowercase : Tuple = os.path.join(data_args.train_dir , """**""" )
if data_args.validation_dir is not None:
__lowercase : Tuple = os.path.join(data_args.validation_dir , """**""" )
__lowercase : Dict = load_dataset(
"""imagefolder""" , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir , task="""image-classification""" , )
# If we don't have a validation split, split off a percentage of train as validation.
__lowercase : Union[str, Any] = None if """validation""" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCAmelCase_ ) and data_args.train_val_split > 0.0:
__lowercase : Optional[int] = dataset["""train"""].train_test_split(data_args.train_val_split )
__lowercase : List[Any] = split["""train"""]
__lowercase : List[Any] = split["""test"""]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__lowercase : Union[str, Any] = dataset["""train"""].features["""labels"""].names
__lowercase , __lowercase : List[str] = {}, {}
for i, label in enumerate(lowerCAmelCase_ ):
__lowercase : Union[str, Any] = str(lowerCAmelCase_ )
__lowercase : Any = label
# Load the accuracy metric from the datasets package
__lowercase : str = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase_ : str ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
__lowercase : Tuple = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase_ ) , labelaid=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , finetuning_task="""image-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowercase : Any = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
__lowercase : Dict = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
__lowercase : str = image_processor.size["""shortest_edge"""]
else:
__lowercase : List[str] = (image_processor.size["""height"""], image_processor.size["""width"""])
__lowercase : int = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
__lowercase : Dict = Compose(
[
RandomResizedCrop(lowerCAmelCase_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
__lowercase : List[Any] = Compose(
[
Resize(lowerCAmelCase_ ),
CenterCrop(lowerCAmelCase_ ),
ToTensor(),
normalize,
] )
def train_transforms(lowerCAmelCase_ : Tuple ):
__lowercase : int = [
_train_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]
]
return example_batch
def val_transforms(lowerCAmelCase_ : int ):
__lowercase : Dict = [_val_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
__lowercase : List[str] = (
dataset["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowerCAmelCase_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
__lowercase : Optional[Any] = (
dataset["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowerCAmelCase_ )
# Initalize our trainer
__lowercase : List[Any] = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=dataset["""train"""] if training_args.do_train else None , eval_dataset=dataset["""validation"""] if training_args.do_eval else None , compute_metrics=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
__lowercase : int = None
if training_args.resume_from_checkpoint is not None:
__lowercase : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowercase : List[Any] = last_checkpoint
__lowercase : Any = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowercase : Union[str, Any] = trainer.evaluate()
trainer.log_metrics("""eval""" , lowerCAmelCase_ )
trainer.save_metrics("""eval""" , lowerCAmelCase_ )
# Write model card and (optionally) push to hub
__lowercase : Optional[int] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """image-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""image-classification""", """vision"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 649
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Dict , __a : Union[str, Any]=13 , __a : Dict=7 , __a : Dict=True , __a : Dict=True , __a : Any=True , __a : List[str]=True , __a : int=99 , __a : Optional[int]=32 , __a : str=2 , __a : int=4 , __a : List[str]=37 , __a : Union[str, Any]="gelu" , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=0.1 , __a : List[Any]=512 , __a : int=16 , __a : Union[str, Any]=2 , __a : Union[str, Any]=0.02 , __a : List[str]=3 , __a : Dict=4 , __a : Optional[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = parent
__lowercase : Tuple = 13
__lowercase : Dict = 7
__lowercase : List[Any] = True
__lowercase : Tuple = True
__lowercase : List[str] = True
__lowercase : Any = True
__lowercase : Optional[int] = 99
__lowercase : str = 384
__lowercase : Optional[Any] = 2
__lowercase : Dict = 4
__lowercase : str = 37
__lowercase : Optional[int] = """gelu"""
__lowercase : int = 0.1
__lowercase : Union[str, Any] = 0.1
__lowercase : Tuple = 512
__lowercase : Tuple = 16
__lowercase : Optional[int] = 2
__lowercase : Optional[Any] = 0.02
__lowercase : Dict = 3
__lowercase : Union[str, Any] = 4
__lowercase : Tuple = 128
__lowercase : Optional[Any] = 2
__lowercase : int = 9
__lowercase : List[Any] = 1
__lowercase : Union[str, Any] = None
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[Any] = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
if self.use_token_type_ids:
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Optional[Any] = None
__lowercase : str = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Optional[int] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict , __a : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : str , __a : Union[str, Any] , __a : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TFConvBertModel(config=__a )
__lowercase : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase : Any = [input_ids, input_mask]
__lowercase : Dict = model(__a )
__lowercase : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Any , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Dict , __a : str ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = TFConvBertForMaskedLM(config=__a )
__lowercase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : Any = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : int , __a : Any , __a : Optional[int] , __a : int , __a : int , __a : List[Any] , __a : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.num_labels
__lowercase : List[Any] = TFConvBertForSequenceClassification(config=__a )
__lowercase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[str] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] , __a : Any , __a : Optional[Any] , __a : int , __a : Optional[int] , __a : Tuple , __a : int , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = self.num_choices
__lowercase : Dict = TFConvBertForMultipleChoice(config=__a )
__lowercase : List[str] = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : int = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowercase : Dict = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[str] , __a : str , __a : List[str] , __a : List[str] , __a : List[str] , __a : Any , __a : Tuple , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Tuple = TFConvBertForTokenClassification(config=__a )
__lowercase : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : str = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , __a : Optional[int] , __a : List[str] , __a : Optional[Any] , __a : int , __a : Tuple , __a : Any , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = TFConvBertForQuestionAnswering(config=__a )
__lowercase : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[Any] = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : int = config_and_inputs
__lowercase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A : str = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Union[str, Any] = False
_A : List[str] = False
_A : Dict = False
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : int = TFConvBertModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Union[str, Any] = True
__lowercase : List[Any] = True
if hasattr(__a , """use_cache""" ):
__lowercase : Optional[Any] = True
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : int = getattr(self.model_tester , """key_length""" , __a )
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Tuple = model_class(__a )
__lowercase : Tuple = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
__lowercase : List[Any] = os.path.join(__a , """saved_model""" , """1""" )
__lowercase : str = tf.keras.models.load_model(__a )
__lowercase : Optional[int] = model(__a )
if self.is_encoder_decoder:
__lowercase : Union[str, Any] = outputs["""encoder_hidden_states"""]
__lowercase : Union[str, Any] = outputs["""encoder_attentions"""]
else:
__lowercase : Union[str, Any] = outputs["""hidden_states"""]
__lowercase : List[str] = outputs["""attentions"""]
self.assertEqual(len(__a ) , __a )
__lowercase : List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : List[str] = True
__lowercase : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
__lowercase : Optional[int] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : List[str] = getattr(self.model_tester , """key_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """key_length""" , __a )
def check_decoder_attentions_output(__a : List[str] ):
__lowercase : Union[str, Any] = len(__a )
self.assertEqual(out_len % 2 , 0 )
__lowercase : Any = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a : str ):
__lowercase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase : int = True
__lowercase : Any = False
__lowercase : List[Any] = model_class(__a )
__lowercase : Tuple = model(self._prepare_for_class(__a , __a ) )
__lowercase : Dict = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
__lowercase : Any = model_class(__a )
__lowercase : List[str] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase : Dict = True
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
__lowercase : List[str] = True
__lowercase : List[Any] = True
__lowercase : Any = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
__lowercase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase : Tuple = model(__a )[0]
__lowercase : Any = [1, 6, 768]
self.assertEqual(output.shape , __a )
__lowercase : Optional[Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 649
| 1
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 649
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : int , *__a : Dict , **__a : Optional[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , __a , )
super().__init__(*__a , **__a )
| 649
| 1
|
def snake_case_ ( lowerCAmelCase_ : list ):
if any(not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(lowerCAmelCase_ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(lowerCAmelCase_ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 649
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__lowercase : List[str] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Dict = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__lowercase : List[str] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 16000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__lowercase : Tuple = tempfile.mkdtemp()
__lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , __a )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
# load decoder from hub
__lowercase : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def lowerCAmelCase ( self : Optional[Any] , **__a : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__a )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , **__a : int ) -> Tuple:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Union[str, Any] , **__a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Any = self.get_feature_extractor()
__lowercase : str = self.get_decoder()
__lowercase : Tuple = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
processor.save_pretrained(self.tmpdirname )
__lowercase : Tuple = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __a )
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase : str = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__a , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : int = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[int] = floats_list((3, 1000) )
__lowercase : List[Any] = feature_extractor(__a , return_tensors="""np""" )
__lowercase : List[str] = processor(__a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : int = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = """This is a test string"""
__lowercase : Any = processor(text=__a )
__lowercase : Dict = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self : str , __a : Tuple=(2, 10, 16) , __a : int=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(__a )
return np.random.rand(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : str = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[str] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__lowercase : Optional[Any] = processor.decode(__a )
__lowercase : Any = decoder.decode_beams(__a )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowerCAmelCase ( self : List[str] , __a : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : Optional[int] = self.get_decoder()
__lowercase : Any = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase : Union[str, Any] = processor.batch_decode(__a )
else:
with get_context(__a ).Pool() as pool:
__lowercase : Optional[Any] = processor.batch_decode(__a , __a )
__lowercase : Union[str, Any] = list(__a )
with get_context("""fork""" ).Pool() as p:
__lowercase : Optional[Any] = decoder.decode_beams_batch(__a , __a )
__lowercase , __lowercase , __lowercase : Any = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__a , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__a , decoded_processor.logit_score )
self.assertListEqual(__a , decoded_processor.lm_score )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : List[str] = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = self._get_dummy_logits()
__lowercase : Tuple = 15
__lowercase : Tuple = -20.0
__lowercase : Dict = -4.0
__lowercase : Dict = processor.batch_decode(
__a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Tuple = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Any = decoder.decode_beams_batch(
__a , __a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][2] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __a )
self.assertTrue(np.array_equal(__a , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __a , atol=1E-3 ) )
self.assertTrue(np.array_equal(__a , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __a , atol=1E-3 ) )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : List[Any] = self.get_tokenizer()
__lowercase : List[Any] = self.get_decoder()
__lowercase : Dict = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[Any] = self._get_dummy_logits()
__lowercase : Optional[int] = 2.0
__lowercase : Tuple = 5.0
__lowercase : Optional[Any] = -20.0
__lowercase : Tuple = True
__lowercase : Union[str, Any] = processor.batch_decode(
__a , alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
__lowercase : Any = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
decoder.reset_params(
alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Tuple = decoder.decode_beams_batch(
__a , __a , )
__lowercase : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __a )
__lowercase : str = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : str = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : int = os.listdir(__a )
__lowercase : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(__a )
__lowercase : Dict = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : List[Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : Dict = os.listdir(__a )
__lowercase : List[Any] = os.listdir(__a )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = floats_list((3, 1000) )
__lowercase : List[str] = processor_wavaveca(__a , return_tensors="""np""" )
__lowercase : List[Any] = processor_auto(__a , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__lowercase : List[str] = self._get_dummy_logits()
__lowercase : List[str] = processor_wavaveca.batch_decode(__a )
__lowercase : Optional[int] = processor_auto.batch_decode(__a )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowerCAmelCase ( __a : Union[str, Any] , __a : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Any = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = self._get_dummy_logits()[0]
__lowercase : Dict = processor.decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = self._get_dummy_logits()
__lowercase : Dict = processor.batch_decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__a , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
import torch
__lowercase : Any = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__a )
__lowercase : str = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=16000 ) )
__lowercase : Tuple = iter(__a )
__lowercase : Union[str, Any] = next(__a )
__lowercase : int = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__lowercase : int = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase : Union[str, Any] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__lowercase : List[Any] = model(__a ).logits.cpu().numpy()
__lowercase : Tuple = processor.decode(logits[0] , output_word_offsets=__a )
__lowercase : int = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase : Optional[Any] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__lowercase : str = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , __a )
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , output.text )
# output times
__lowercase : Tuple = torch.tensor(self.get_from_offsets(__a , """start_time""" ) )
__lowercase : Dict = torch.tensor(self.get_from_offsets(__a , """end_time""" ) )
# fmt: off
__lowercase : List[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__lowercase : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
| 649
| 1
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
set_seed(7_70)
lowerCamelCase : Optional[int] = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowerCamelCase : Optional[Any] = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowerCamelCase : Any = os.path.dirname(os.path.abspath(__file__))
lowerCamelCase : List[str] = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowerCamelCase : str = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : str=False ):
__lowercase : Optional[int] = model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase_ , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] ):
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
hf_hub_download(repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , local_dir=lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Any="text" ):
if model_type == "text":
__lowercase : Any = BarkSemanticModel
__lowercase : Union[str, Any] = BarkSemanticConfig
__lowercase : Any = BarkSemanticGenerationConfig
elif model_type == "coarse":
__lowercase : Dict = BarkCoarseModel
__lowercase : List[Any] = BarkCoarseConfig
__lowercase : Union[str, Any] = BarkCoarseGenerationConfig
elif model_type == "fine":
__lowercase : List[str] = BarkFineModel
__lowercase : Tuple = BarkFineConfig
__lowercase : Dict = BarkFineGenerationConfig
else:
raise NotImplementedError()
__lowercase : int = F"{model_type}_small" if use_small else model_type
__lowercase : Union[str, Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase_ ):
logger.info(F"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
__lowercase : str = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
# this is a hack
__lowercase : Union[str, Any] = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
__lowercase : List[str] = model_args["""vocab_size"""]
__lowercase : Tuple = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__lowercase : List[Any] = model_args.pop("""n_head""" )
__lowercase : str = model_args.pop("""n_embd""" )
__lowercase : int = model_args.pop("""n_layer""" )
__lowercase : List[Any] = ConfigClass(**checkpoint["""model_args"""] )
__lowercase : Optional[int] = ModelClass(config=lowerCAmelCase_ )
__lowercase : List[str] = GenerationConfigClass()
__lowercase : Tuple = model_generation_config
__lowercase : str = checkpoint["""model"""]
# fixup checkpoint
__lowercase : List[str] = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
__lowercase : Union[str, Any] = k[len(lowerCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
__lowercase : Optional[int] = new_k.replace(lowerCAmelCase_ , new_layer_name_dict[old_layer_name] )
__lowercase : List[str] = state_dict.pop(lowerCAmelCase_ )
__lowercase : Any = set(state_dict.keys() ) - set(model.state_dict().keys() )
__lowercase : Union[str, Any] = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
__lowercase : Dict = set(model.state_dict().keys() ) - set(state_dict.keys() )
__lowercase : Tuple = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(lowerCAmelCase_ ) != 0:
raise ValueError(F"extra keys found: {extra_keys}" )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(F"missing keys: {missing_keys}" )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
__lowercase : int = model.num_parameters(exclude_embeddings=lowerCAmelCase_ )
__lowercase : Optional[Any] = checkpoint["""best_val_loss"""].item()
logger.info(F"model loaded: {round(n_params/1e6 , 1 )}M params, {round(lowerCAmelCase_ , 3 )} loss" )
model.eval()
model.to(lowerCAmelCase_ )
del checkpoint, state_dict
return model
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : int=False , lowerCAmelCase_ : int="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__lowercase : Any = """cpu""" # do conversion on cpu
__lowercase : str = _get_ckpt_path(lowerCAmelCase_ , use_small=lowerCAmelCase_ )
__lowercase : Optional[Any] = _load_model(lowerCAmelCase_ , lowerCAmelCase_ , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
# load bark initial model
__lowercase : Optional[int] = _bark_load_model(lowerCAmelCase_ , """cpu""" , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
if model_type == "text":
__lowercase : Any = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=lowerCAmelCase_ ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
__lowercase : Dict = 5
__lowercase : Any = 10
if model_type in ["text", "coarse"]:
__lowercase : int = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__lowercase : int = bark_model(lowerCAmelCase_ )[0]
__lowercase : Optional[int] = model(lowerCAmelCase_ )
# take last logits
__lowercase : Dict = output_new_model_total.logits[:, [-1], :]
else:
__lowercase : Dict = 3
__lowercase : Any = 8
__lowercase : Tuple = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__lowercase : Any = model(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = bark_model(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : List[Any] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , ):
__lowercase : str = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Optional[int] = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase_ , """config.json""" ) )
__lowercase : Dict = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase_ , """config.json""" ) )
__lowercase : Union[str, Any] = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase_ , """config.json""" ) )
__lowercase : Dict = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
__lowercase : str = BarkSemanticModel.from_pretrained(lowerCAmelCase_ )
__lowercase : Tuple = BarkCoarseModel.from_pretrained(lowerCAmelCase_ )
__lowercase : Any = BarkFineModel.from_pretrained(lowerCAmelCase_ )
__lowercase : List[str] = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
__lowercase : List[Any] = BarkConfig.from_sub_model_configs(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Any = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__lowercase : List[str] = BarkModel(lowerCAmelCase_ )
__lowercase : int = semantic
__lowercase : List[Any] = coarseAcoustic
__lowercase : Union[str, Any] = fineAcoustic
__lowercase : Any = codec
__lowercase : str = bark_generation_config
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
bark.save_pretrained(lowerCAmelCase_ , repo_id=lowerCAmelCase_ , push_to_hub=lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowerCamelCase : Dict = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 649
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(0 ) == 0 )
def snake_case_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 649
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : int = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 649
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase : int = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def snake_case_ ( ):
__lowercase : List[Any] = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase : Union[str, Any] = get_sagemaker_input()
else:
__lowercase : str = get_cluster_input()
return config
def snake_case_ ( lowerCAmelCase_ : List[str]=None ):
if subparsers is not None:
__lowercase : Optional[int] = subparsers.add_parser("""config""" , description=lowerCAmelCase_ )
else:
__lowercase : List[str] = argparse.ArgumentParser("""Accelerate config command""" , description=lowerCAmelCase_ )
parser.add_argument(
"""--config_file""" , default=lowerCAmelCase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : Union[str, Any] = get_user_input()
if args.config_file is not None:
__lowercase : List[Any] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
__lowercase : Any = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(lowerCAmelCase_ )
else:
config.to_yaml_file(lowerCAmelCase_ )
print(F"accelerate configuration saved at {config_file}" )
def snake_case_ ( ):
__lowercase : str = config_command_parser()
__lowercase : str = parser.parse_args()
config_command(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 649
| 1
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowerCAmelCase ( __a , __a ):
'''simple docstring'''
@register_to_config
def __init__( self : str , __a : int = 768 , ) -> str:
"""simple docstring"""
super().__init__()
__lowercase : Optional[Any] = nn.Parameter(torch.zeros(1 , __a ) )
__lowercase : List[str] = nn.Parameter(torch.ones(1 , __a ) )
def lowerCAmelCase ( self : Any , __a : Optional[Union[str, torch.device]] = None , __a : Optional[torch.dtype] = None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = nn.Parameter(self.mean.to(__a ).to(__a ) )
__lowercase : str = nn.Parameter(self.std.to(__a ).to(__a ) )
return self
def lowerCAmelCase ( self : Optional[Any] , __a : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase ( self : List[str] , __a : int ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = (embeds * self.std) + self.mean
return embeds
| 649
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] | None = None ):
__lowercase : Tuple = word_bank or []
# create a table
__lowercase : int = len(lowerCAmelCase_ ) + 1
__lowercase : list[list[list[str]]] = []
for _ in range(lowerCAmelCase_ ):
table.append([] )
# seed value
__lowercase : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCAmelCase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCAmelCase_ )] == word:
__lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCAmelCase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCAmelCase_ )]:
combination.reverse()
return table[len(lowerCAmelCase_ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 649
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowerCamelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : Any , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__( self : str , __a : int = 1 , __a : int = 100 , __a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a : Optional[float] = None , __a : bool = True , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if audio_length_in_s is None:
__lowercase : List[str] = self.unet.config.sample_size / self.unet.config.sample_rate
__lowercase : List[str] = audio_length_in_s * self.unet.config.sample_rate
__lowercase : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
F" {3 * down_scale_factor / self.unet.config.sample_rate}." )
__lowercase : int = int(__a )
if sample_size % down_scale_factor != 0:
__lowercase : Optional[Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
F" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
""" process.""" )
__lowercase : str = int(__a )
__lowercase : List[Any] = next(iter(self.unet.parameters() ) ).dtype
__lowercase : Dict = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__a , __a ) and len(__a ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(__a )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
__lowercase : Dict = randn_tensor(__a , generator=__a , device=self.device , dtype=__a )
# set step values
self.scheduler.set_timesteps(__a , device=audio.device )
__lowercase : Tuple = self.scheduler.timesteps.to(__a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__lowercase : Optional[Any] = self.unet(__a , __a ).sample
# 2. compute previous image: x_t -> t_t-1
__lowercase : int = self.scheduler.step(__a , __a , __a ).prev_sample
__lowercase : Dict = audio.clamp(-1 , 1 ).float().cpu().numpy()
__lowercase : List[str] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__a )
| 649
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(1 ) != 0 )
def snake_case_ ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 649
| 1
|
lowerCamelCase : Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def snake_case_ ( lowerCAmelCase_ : bytes ):
# Make sure the supplied data is a bytes-like object
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : str = F"a bytes-like object is required, not '{data.__class__.__name__}'"
raise TypeError(lowerCAmelCase_ )
__lowercase : List[Any] = """""".join(bin(lowerCAmelCase_ )[2:].zfill(8 ) for byte in data )
__lowercase : Dict = len(lowerCAmelCase_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
__lowercase : str = b"""=""" * ((6 - len(lowerCAmelCase_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowerCAmelCase_ ) % 6)
else:
__lowercase : Optional[int] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowerCAmelCase_ ) , 6 ) ).encode()
+ padding
)
def snake_case_ ( lowerCAmelCase_ : str ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : str = (
"""argument should be a bytes-like object or ASCII string, """
F"not '{encoded_data.__class__.__name__}'"
)
raise TypeError(lowerCAmelCase_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
__lowercase : int = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
__lowercase : List[str] = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowerCAmelCase_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__lowercase : Optional[int] = encoded_data[:-padding]
__lowercase : int = """""".join(
bin(B64_CHARSET.index(lowerCAmelCase_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__lowercase : str = """""".join(
bin(B64_CHARSET.index(lowerCAmelCase_ ) )[2:].zfill(6 ) for char in encoded_data )
__lowercase : Optional[Any] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowerCAmelCase_ ) , 8 )
]
return bytes(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : int = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 649
| 1
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : int = """ylacombe/bark-small"""
__lowercase : int = tempfile.mkdtemp()
__lowercase : List[Any] = """en_speaker_1"""
__lowercase : Union[str, Any] = """This is a test string"""
__lowercase : List[str] = """speaker_embeddings_path.json"""
__lowercase : Any = """speaker_embeddings"""
def lowerCAmelCase ( self : Union[str, Any] , **__a : Tuple ) -> List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = self.get_tokenizer()
__lowercase : List[Any] = BarkProcessor(tokenizer=__a )
processor.save_pretrained(self.tmpdirname )
__lowercase : Tuple = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__lowercase : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__lowercase : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__lowercase : Union[str, Any] = 35
__lowercase : Union[str, Any] = 2
__lowercase : str = 8
__lowercase : int = {
"""semantic_prompt""": np.ones(__a ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__lowercase : int = processor(text=self.input_string , voice_preset=__a )
__lowercase : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__a , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__lowercase : Union[str, Any] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(__a , **__a )
__lowercase : str = processor(text=self.input_string , voice_preset=__a )
__lowercase : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__a , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__lowercase : Optional[int] = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : int = BarkProcessor(tokenizer=__a )
__lowercase : List[Any] = processor(text=self.input_string )
__lowercase : Optional[Any] = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=__a , return_attention_mask=__a , return_token_type_ids=__a , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 649
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCamelCase : Any = None
try:
import msvcrt
except ImportError:
lowerCamelCase : str = None
try:
import fcntl
except ImportError:
lowerCamelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
lowerCamelCase : Tuple = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
lowerCamelCase : Tuple = '''3.0.12'''
lowerCamelCase : Any = None
def snake_case_ ( ):
global _logger
__lowercase : List[str] = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , __a : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = lock_file
return None
def __str__( self : str ) -> Any:
"""simple docstring"""
__lowercase : Any = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = lock
return None
def __enter__( self : Dict ) -> Dict:
"""simple docstring"""
return self.lock
def __exit__( self : Optional[int] , __a : Dict , __a : Any , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.lock.release()
return None
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __a : Any , __a : Dict=-1 , __a : Optional[Any]=None ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowercase : Dict = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
__lowercase : Optional[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowercase : int = None
# The default timeout value.
__lowercase : Optional[int] = timeout
# We use this lock primarily for the lock counter.
__lowercase : Optional[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowercase : Union[str, Any] = 0
return None
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self._lock_file
@property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self._timeout
@timeout.setter
def lowerCAmelCase ( self : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = float(__a )
return None
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@property
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self._lock_file_fd is not None
def lowerCAmelCase ( self : Any , __a : Optional[Any]=None , __a : Union[str, Any]=0.05 ) -> List[str]:
"""simple docstring"""
if timeout is None:
__lowercase : Union[str, Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowercase : int = id(self )
__lowercase : Optional[Any] = self._lock_file
__lowercase : List[str] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowercase : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowercase : Optional[Any] = id(self )
__lowercase : str = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
__lowercase : List[str] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : List[str] , __a : str , __a : int , __a : List[Any] ) -> Tuple:
"""simple docstring"""
self.release()
return None
def __del__( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.release(force=__a )
return None
def lowerCAmelCase ( self : Tuple , __a : str , __a : int ) -> str:
"""simple docstring"""
__lowercase : List[Any] = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
__lowercase : int = os.path.dirname(__a )
__lowercase : List[str] = str(hash(__a ) )
__lowercase : Optional[Any] = filename[: max_length - len(__a ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__a , __a )
else:
return path
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : List[Any] , __a : Optional[int]=-1 , __a : Tuple=None ) -> List[Any]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
__lowercase : Tuple = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowercase : Tuple = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
__lowercase : Union[str, Any] = fd
return None
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self._lock_file_fd
__lowercase : int = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : List[str] , __a : Optional[Any] , __a : str=-1 , __a : List[str]=None ) -> Any:
"""simple docstring"""
__lowercase : Dict = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowercase : List[str] = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
__lowercase : str = fd
return None
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = self._lock_file_fd
__lowercase : List[str] = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowercase : Union[str, Any] = os.open(self._lock_file , __a )
except OSError:
pass
else:
__lowercase : Optional[int] = fd
return None
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
os.close(self._lock_file_fd )
__lowercase : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCamelCase : Optional[Any] = None
if msvcrt:
lowerCamelCase : List[Any] = WindowsFileLock
elif fcntl:
lowerCamelCase : List[Any] = UnixFileLock
else:
lowerCamelCase : Union[str, Any] = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 649
| 1
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __lt__( self : Any , __a : Tuple ) -> Dict:
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : int , __a : List[Any] ) -> Any:
"""simple docstring"""
return self[-1] == other[-1]
def snake_case_ ( lowerCAmelCase_ : list ):
__lowercase : list[Stack] = []
# sort into stacks
for element in collection:
__lowercase : str = Stack([element] )
__lowercase : int = bisect_left(lowerCAmelCase_ , lowerCAmelCase_ )
if i != len(lowerCAmelCase_ ):
stacks[i].append(lowerCAmelCase_ )
else:
stacks.append(lowerCAmelCase_ )
# use a heap-based merge to merge stack efficiently
__lowercase : str = merge(*(reversed(lowerCAmelCase_ ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowerCamelCase : str = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase : Tuple = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 649
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''layoutlmv3'''
def __init__( self : Dict , __a : List[str]=50265 , __a : str=768 , __a : List[Any]=12 , __a : List[Any]=12 , __a : List[str]=3072 , __a : Optional[Any]="gelu" , __a : Optional[int]=0.1 , __a : List[Any]=0.1 , __a : Tuple=512 , __a : int=2 , __a : Any=0.02 , __a : Union[str, Any]=1E-5 , __a : List[str]=1 , __a : List[Any]=0 , __a : int=2 , __a : str=1024 , __a : str=128 , __a : List[Any]=128 , __a : Tuple=True , __a : Optional[int]=32 , __a : Any=128 , __a : List[Any]=64 , __a : Tuple=256 , __a : str=True , __a : int=True , __a : Optional[Any]=True , __a : Any=224 , __a : str=3 , __a : List[str]=16 , __a : Union[str, Any]=None , **__a : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
vocab_size=__a , hidden_size=__a , num_hidden_layers=__a , num_attention_heads=__a , intermediate_size=__a , hidden_act=__a , hidden_dropout_prob=__a , attention_probs_dropout_prob=__a , max_position_embeddings=__a , type_vocab_size=__a , initializer_range=__a , layer_norm_eps=__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a , )
__lowercase : int = max_ad_position_embeddings
__lowercase : Any = coordinate_size
__lowercase : Optional[Any] = shape_size
__lowercase : str = has_relative_attention_bias
__lowercase : int = rel_pos_bins
__lowercase : Union[str, Any] = max_rel_pos
__lowercase : str = has_spatial_attention_bias
__lowercase : str = rel_ad_pos_bins
__lowercase : List[Any] = max_rel_ad_pos
__lowercase : Tuple = text_embed
__lowercase : int = visual_embed
__lowercase : Tuple = input_size
__lowercase : Dict = num_channels
__lowercase : str = patch_size
__lowercase : Optional[int] = classifier_dropout
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = version.parse('''1.12''' )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return 12
def lowerCAmelCase ( self : List[Any] , __a : "ProcessorMixin" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , __a )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase : Tuple = processor.tokenizer.num_special_tokens_to_add(__a )
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
__lowercase : Union[str, Any] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase : Tuple = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase : Tuple = self._generate_dummy_images(__a , __a , __a , __a )
__lowercase : int = dict(
processor(
__a , text=__a , boxes=__a , return_tensors=__a , ) )
return inputs
| 649
| 1
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError("""String lengths must match!""" )
__lowercase : Tuple = 0
for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : str = None , __a : uuid.UUID = None , __a : Any=None , __a : List[Any]=None ) -> List[Any]:
"""simple docstring"""
if not conversation_id:
__lowercase : Any = uuid.uuida()
if past_user_inputs is None:
__lowercase : Dict = []
if generated_responses is None:
__lowercase : Dict = []
__lowercase : uuid.UUID = conversation_id
__lowercase : List[str] = past_user_inputs
__lowercase : List[str] = generated_responses
__lowercase : Optional[str] = text
def __eq__( self : Dict , __a : Dict ) -> Any:
"""simple docstring"""
if not isinstance(__a , __a ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase ( self : List[str] , __a : str , __a : bool = False ) -> Dict:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
__lowercase : Optional[int] = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
__lowercase : Dict = text
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowercase : Dict = None
def lowerCAmelCase ( self : Optional[int] , __a : str ) -> List[Any]:
"""simple docstring"""
self.generated_responses.append(__a )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : int ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
__lowercase : Optional[Any] = """user""" if is_user else """bot"""
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
__a , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , *__a : int , **__a : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*__a , **__a )
if self.tokenizer.pad_token_id is None:
__lowercase : List[Any] = self.tokenizer.eos_token
def lowerCAmelCase ( self : Union[str, Any] , __a : int=None , __a : Tuple=None , __a : Any=None , **__a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = {}
__lowercase : Tuple = {}
__lowercase : List[str] = {}
if min_length_for_response is not None:
__lowercase : Dict = min_length_for_response
if minimum_tokens is not None:
__lowercase : Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
__lowercase : Union[str, Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowercase : Union[str, Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__a )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[int] , __a : Union[Conversation, List[Conversation]] , __a : Dict=0 , **__a : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = super().__call__(__a , num_workers=__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase ( self : Union[str, Any] , __a : Conversation , __a : Tuple=32 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowercase : List[Any] = self.tokenizer._build_conversation_input_ids(__a )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowercase : Tuple = self._legacy_parse_and_tokenize(__a )
if self.framework == "pt":
__lowercase : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowercase : List[str] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase ( self : Any , __a : Dict , __a : Any=10 , **__a : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowercase : List[Any] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
__lowercase : Any = max_length - minimum_tokens
__lowercase : int = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowercase : Dict = model_inputs["""attention_mask"""][:, -trim:]
__lowercase : Union[str, Any] = model_inputs.pop("""conversation""" )
__lowercase : Tuple = max_length
__lowercase : int = self.model.generate(**__a , **__a )
if self.model.config.is_encoder_decoder:
__lowercase : Optional[int] = 1
else:
__lowercase : str = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase ( self : int , __a : Tuple , __a : List[Any]=True ) -> List[str]:
"""simple docstring"""
__lowercase : int = model_outputs["""output_ids"""]
__lowercase : Union[str, Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , )
__lowercase : List[str] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(__a )
return conversation
def lowerCAmelCase ( self : int , __a : Conversation ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = self.tokenizer.eos_token_id
__lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) )
if len(__a ) > self.tokenizer.model_max_length:
__lowercase : List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 649
| 1
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCamelCase : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __a : List[str] , __a : Optional[int]=16 , __a : Optional[Any]=13 , __a : str=7 , __a : List[str]=14 , __a : Any=10 , __a : str=19 , __a : int=5 , __a : Any=4 , __a : List[Any]=True , __a : Tuple=16 , __a : Dict=2 , __a : Tuple=4 , __a : int=4 , __a : List[Any]="gelu" , __a : Tuple=0.1 , __a : List[str]=0.1 , __a : int=[1, 2, 3, 4, 5] , __a : str=25 , __a : Any=5 , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = d_model
__lowercase : Dict = parent
__lowercase : Tuple = batch_size
__lowercase : Optional[int] = prediction_length
__lowercase : List[str] = context_length
__lowercase : Any = cardinality
__lowercase : str = num_time_features
__lowercase : Optional[int] = lags_sequence
__lowercase : Optional[Any] = embedding_dimension
__lowercase : List[Any] = is_training
__lowercase : List[str] = hidden_size
__lowercase : int = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : int = hidden_act
__lowercase : str = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : str = context_length
__lowercase : int = prediction_length + label_length
__lowercase : Union[str, Any] = label_length
__lowercase : Optional[int] = moving_average
__lowercase : Optional[Any] = autocorrelation_factor
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCAmelCase ( self : Tuple , __a : str ) -> int:
"""simple docstring"""
__lowercase : Any = config.context_length + max(config.lags_sequence )
__lowercase : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowercase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowercase : Dict = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowercase : str = floats_tensor([self.batch_size, config.prediction_length] )
__lowercase : List[str] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_config()
__lowercase : Any = self.prepare_autoformer_inputs_dict(__a )
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] , __a : Tuple , __a : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel(config=__a ).to(__a ).eval()
__lowercase : Optional[int] = model(**__a )
__lowercase : Dict = outputs.encoder_last_hidden_state
__lowercase : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : List[str] = model.get_encoder()
encoder.save_pretrained(__a )
__lowercase : List[str] = AutoformerEncoder.from_pretrained(__a ).to(__a )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Any = model.create_network_inputs(**__a )
__lowercase , __lowercase : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowercase : Optional[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowercase : Union[str, Any] = encoder(inputs_embeds=__a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__lowercase : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowercase : Optional[int] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowercase : Any = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowercase : Dict = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Optional[Any] = model.get_decoder()
decoder.save_pretrained(__a )
__lowercase : Tuple = AutoformerDecoder.from_pretrained(__a ).to(__a )
__lowercase : str = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_A : List[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_A : Any = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_A : Dict = False
_A : Tuple = False
_A : Optional[int] = False
_A : Tuple = False
_A : str = False
_A : Union[str, Any] = False
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : List[str] = AutoformerModelTester(self )
__lowercase : Dict = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase , __lowercase : Tuple = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : Any = inspect.signature(getattr(__a , """forward""" ) )
# The main input is the name of the argument after `self`
__lowercase : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __a )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
__lowercase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Any = [*signature.parameters.keys()]
__lowercase : int = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__a )] , __a )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : int = True
__lowercase : Tuple = getattr(self.model_tester , """seq_length""" , __a )
__lowercase : Union[str, Any] = getattr(self.model_tester , """decoder_seq_length""" , __a )
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """d_model""" , __a )
__lowercase : Optional[int] = getattr(self.model_tester , """num_attention_heads""" , __a )
__lowercase : Any = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowercase : Dict = True
__lowercase : List[str] = False
__lowercase : Optional[int] = True
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : int = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase : Optional[int] = True
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Dict = outputs.encoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowercase : Tuple = len(__a )
__lowercase : str = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a )
# decoder attentions
__lowercase : List[Any] = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowercase : Optional[int] = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowercase : Tuple = True
__lowercase : Union[str, Any] = True
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Any = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 2 , len(__a ) )
__lowercase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def snake_case_ ( lowerCAmelCase_ : Optional[int]="train-batch.pt" ):
__lowercase : Dict = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=lowerCAmelCase_ , repo_type="""dataset""" )
__lowercase : Optional[int] = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
return batch
@require_torch
@slow
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[Any] = prepare_batch()
with torch.no_grad():
__lowercase : Tuple = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowercase : List[str] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[str] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : Optional[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowercase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : Optional[int] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : int = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowercase : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __a )
__lowercase : Optional[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__a )
__lowercase : Dict = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1E-1 ) )
| 649
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__a , """depth_multiplier""" ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Tuple , __a : str=13 , __a : Dict=3 , __a : List[Any]=32 , __a : Any=0.25 , __a : Any=8 , __a : Optional[int]=8 , __a : Optional[int]=6 , __a : Dict=32 , __a : Tuple=True , __a : List[Any]=True , __a : Optional[int]=True , __a : Tuple="relu6" , __a : Optional[Any]=1280 , __a : str=0.1 , __a : str=0.02 , __a : Optional[Any]=True , __a : Tuple=True , __a : Dict=10 , __a : Optional[Any]=None , ) -> Any:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Tuple = batch_size
__lowercase : Dict = num_channels
__lowercase : Optional[int] = image_size
__lowercase : int = depth_multiplier
__lowercase : str = depth_divisible_by
__lowercase : int = min_depth
__lowercase : Tuple = expand_ratio
__lowercase : Optional[int] = tf_padding
__lowercase : Dict = output_stride
__lowercase : Dict = first_layer_is_expansion
__lowercase : Optional[Any] = finegrained_output
__lowercase : str = hidden_act
__lowercase : Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__lowercase : Optional[int] = classifier_dropout_prob
__lowercase : int = use_labels
__lowercase : Optional[int] = is_training
__lowercase : Dict = num_labels
__lowercase : Tuple = initializer_range
__lowercase : Optional[Any] = scope
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : List[Any] = None
__lowercase : Optional[Any] = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = MobileNetVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase ( self : List[str] , __a : Optional[int] , __a : List[str] , __a : str , __a : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = self.num_labels
__lowercase : Dict = MobileNetVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __a : List[str] , __a : Tuple , __a : Any , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.num_labels
__lowercase : List[Any] = MobileNetVaForSemanticSegmentation(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase : str = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : List[str] = config_and_inputs
__lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : List[str] = False
_A : List[str] = False
_A : Optional[int] = False
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = MobileNetVaModelTester(self )
__lowercase : int = MobileNetVaConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : int = [*signature.parameters.keys()]
__lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__a : List[Any] , __a : Tuple , __a : List[str] ):
__lowercase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Tuple = outputs.hidden_states
__lowercase : str = 16
self.assertEqual(len(__a ) , __a )
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Any = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = MobileNetVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Tuple = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__a )
__lowercase : str = self.default_image_processor
__lowercase : Tuple = prepare_img()
__lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : str = model(**__a )
# verify the logits
__lowercase : Union[str, Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : int = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : Dict = model.to(__a )
__lowercase : Tuple = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[int] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Any = outputs.logits
# verify the logits
__lowercase : Dict = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __a )
__lowercase : str = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) )
| 649
| 1
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __a : List[str] , __a : Dict=2 , __a : Union[str, Any]=True , __a : List[Any]=False , __a : int=10 , __a : List[Any]=3 , __a : Optional[int]=32 * 8 , __a : str=32 * 8 , __a : Optional[int]=4 , __a : str=64 , ) -> str:
"""simple docstring"""
__lowercase : List[Any] = parent
__lowercase : List[str] = batch_size
__lowercase : Union[str, Any] = is_training
__lowercase : Union[str, Any] = use_auxiliary_loss
__lowercase : int = num_queries
__lowercase : Dict = num_channels
__lowercase : List[Any] = min_size
__lowercase : Optional[int] = max_size
__lowercase : Optional[Any] = num_labels
__lowercase : List[str] = hidden_dim
__lowercase : Optional[int] = hidden_dim
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__a )
__lowercase : List[str] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__a )
__lowercase : Dict = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__a ) > 0.5
).float()
__lowercase : Dict = (torch.rand((self.batch_size, self.num_labels) , device=__a ) > 0.5).long()
__lowercase : int = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__lowercase : List[Any] = self.num_queries
__lowercase : Union[str, Any] = self.num_labels
__lowercase : Dict = [1, 1, 1, 1]
__lowercase : Optional[int] = self.num_channels
__lowercase : List[str] = 64
__lowercase : List[str] = 128
__lowercase : List[str] = self.hidden_dim
__lowercase : Optional[Any] = self.hidden_dim
__lowercase : List[str] = self.hidden_dim
return config
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = self.prepare_config_and_inputs()
__lowercase : Any = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def lowerCAmelCase ( self : str , __a : Union[str, Any] , __a : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase : List[Any] = output.encoder_hidden_states
__lowercase : int = output.pixel_decoder_hidden_states
__lowercase : Dict = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__a ) , config.decoder_layers )
def lowerCAmelCase ( self : Optional[Any] , __a : Any , __a : List[str] , __a : List[str] , __a : List[Any]=False ) -> str:
"""simple docstring"""
with torch.no_grad():
__lowercase : Union[str, Any] = MaskaFormerModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = model(pixel_values=__a , pixel_mask=__a )
__lowercase : Optional[int] = model(__a , output_hidden_states=__a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__a , __a )
def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[str] , __a : Dict , __a : Tuple , __a : str ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = MaskaFormerForUniversalSegmentation(config=__a )
model.to(__a )
model.eval()
def comm_check_on_output(__a : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowercase : Tuple = model(pixel_values=__a , pixel_mask=__a )
__lowercase : str = model(__a )
comm_check_on_output(__a )
__lowercase : int = model(
pixel_values=__a , pixel_mask=__a , mask_labels=__a , class_labels=__a )
comm_check_on_output(__a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_A : Union[str, Any] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
_A : Optional[int] = False
_A : Dict = False
_A : List[str] = False
_A : Dict = False
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = MaskaFormerModelTester(self )
__lowercase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__a , **__a , output_hidden_states=__a )
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__a )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : str = [*signature.parameters.keys()]
__lowercase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__lowercase : Any = MaskaFormerModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = (self.model_tester.min_size,) * 2
__lowercase : Tuple = {
"""pixel_values""": torch.randn((2, 3, *size) , device=__a ),
"""mask_labels""": torch.randn((2, 10, *size) , device=__a ),
"""class_labels""": torch.zeros(2 , 10 , device=__a ).long(),
}
__lowercase : List[Any] = self.model_tester.get_config()
__lowercase : str = MaskaFormerForUniversalSegmentation(__a ).to(__a )
__lowercase : Any = model(**__a )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__a , **__a , output_hidden_states=__a )
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Union[str, Any] = model_class(__a ).to(__a )
__lowercase : Any = model(**__a , output_attentions=__a )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
if not self.model_tester.is_training:
return
__lowercase : Optional[Any] = self.all_model_classes[1]
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Any = self.model_tester.prepare_config_and_inputs()
__lowercase : Optional[int] = model_class(__a )
model.to(__a )
model.train()
__lowercase : Any = model(__a , mask_labels=__a , class_labels=__a ).loss
loss.backward()
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = self.all_model_classes[1]
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
__lowercase : List[str] = True
__lowercase : int = True
__lowercase : Optional[int] = model_class(__a ).to(__a )
model.train()
__lowercase : Dict = model(__a , mask_labels=__a , class_labels=__a )
__lowercase : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowercase : int = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__lowercase : Any = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowercase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase : Optional[int] = 1E-4
def snake_case_ ( ):
__lowercase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : str = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__a )
__lowercase : Tuple = self.default_image_processor
__lowercase : int = prepare_img()
__lowercase : Dict = image_processor(__a , return_tensors="""pt""" ).to(__a )
__lowercase : int = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__a , (1, 3, 384, 384) )
with torch.no_grad():
__lowercase : Union[str, Any] = model(**__a )
__lowercase : str = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(__a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __a , atol=__a ) )
__lowercase : List[Any] = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(__a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __a , atol=__a ) )
__lowercase : Optional[Any] = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(__a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__a ).eval()
__lowercase : Optional[Any] = self.default_image_processor
__lowercase : Union[str, Any] = prepare_img()
__lowercase : Any = image_processor(__a , return_tensors="""pt""" ).to(__a )
__lowercase : str = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__a , (1, 3, 384, 384) )
with torch.no_grad():
__lowercase : Optional[Any] = model(**__a )
# masks_queries_logits
__lowercase : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__lowercase : Dict = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
__lowercase : int = torch.tensor(__a ).to(__a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __a , atol=__a ) )
# class_queries_logits
__lowercase : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__lowercase : List[str] = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(__a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__a ).eval()
__lowercase : Optional[int] = self.default_image_processor
__lowercase : Union[str, Any] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
__lowercase : Optional[Any] = inputs["""pixel_values"""].to(__a )
__lowercase : Optional[int] = [el.to(__a ) for el in inputs["""mask_labels"""]]
__lowercase : str = [el.to(__a ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__lowercase : Optional[Any] = model(**__a )
self.assertTrue(outputs.loss is not None )
| 649
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def snake_case_ ( lowerCAmelCase_ : bool = True , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[str] ):
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
__lowercase : List[str] = False
if main_process_only:
__lowercase : Optional[int] = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ , disable=lowerCAmelCase_ )
| 649
| 1
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Tuple , __a : str , __a : str=13 , __a : Optional[int]=7 , __a : Optional[int]=True , __a : int=True , __a : Any=True , __a : List[Any]=True , __a : Any=True , __a : Union[str, Any]=False , __a : Dict=False , __a : List[Any]=False , __a : List[str]=2 , __a : Union[str, Any]=99 , __a : int=0 , __a : List[Any]=32 , __a : Dict=5 , __a : Optional[int]=4 , __a : List[Any]=0.1 , __a : Optional[Any]=0.1 , __a : List[Any]=512 , __a : Tuple=12 , __a : str=2 , __a : List[Any]=0.02 , __a : int=3 , __a : int=4 , __a : Optional[int]="last" , __a : str=None , __a : Optional[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : List[str] = batch_size
__lowercase : Union[str, Any] = seq_length
__lowercase : Any = is_training
__lowercase : Any = use_input_lengths
__lowercase : str = use_token_type_ids
__lowercase : Union[str, Any] = use_labels
__lowercase : Optional[int] = gelu_activation
__lowercase : Optional[Any] = sinusoidal_embeddings
__lowercase : List[str] = causal
__lowercase : List[Any] = asm
__lowercase : List[str] = n_langs
__lowercase : Dict = vocab_size
__lowercase : Optional[int] = n_special
__lowercase : List[Any] = hidden_size
__lowercase : Union[str, Any] = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : Any = hidden_dropout_prob
__lowercase : List[str] = attention_probs_dropout_prob
__lowercase : Any = max_position_embeddings
__lowercase : Dict = type_vocab_size
__lowercase : Tuple = type_sequence_label_size
__lowercase : int = initializer_range
__lowercase : Union[str, Any] = num_labels
__lowercase : List[str] = num_choices
__lowercase : int = summary_type
__lowercase : Any = use_proj
__lowercase : int = scope
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Optional[int] = None
if self.use_input_lengths:
__lowercase : Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase : Tuple = None
if self.use_token_type_ids:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase : int = None
__lowercase : List[str] = None
__lowercase : str = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
__lowercase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : List[str] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def lowerCAmelCase ( self : Optional[Any] , __a : Any , __a : Optional[int] , __a : List[Any] , __a : List[str] , __a : Optional[Any] , __a : int , __a : Tuple , __a : Optional[int] , __a : Optional[Any] , ) -> Tuple:
"""simple docstring"""
__lowercase : Any = FlaubertModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Any = model(__a , lengths=__a , langs=__a )
__lowercase : Dict = model(__a , langs=__a )
__lowercase : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __a : str , __a : List[Any] , __a : Tuple , __a : Any , __a : Tuple , __a : str , __a : Tuple , __a : int , __a : Tuple , ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = FlaubertWithLMHeadModel(__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[Any] , __a : int , __a : Optional[int] , __a : Optional[int] , __a : List[Any] , __a : int , __a : int , __a : Optional[Any] , __a : Optional[int] , __a : Dict , ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = FlaubertForQuestionAnsweringSimple(__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
__lowercase : int = model(__a , start_positions=__a , end_positions=__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Any , __a : Dict , __a : Dict , __a : Any , __a : Tuple , __a : List[str] , __a : Any , __a : List[Any] , __a : Optional[Any] , __a : Dict , ) -> str:
"""simple docstring"""
__lowercase : int = FlaubertForQuestionAnswering(__a )
model.to(__a )
model.eval()
__lowercase : List[str] = model(__a )
__lowercase : str = model(
__a , start_positions=__a , end_positions=__a , cls_index=__a , is_impossible=__a , p_mask=__a , )
__lowercase : List[str] = model(
__a , start_positions=__a , end_positions=__a , cls_index=__a , is_impossible=__a , )
((__lowercase) , ) : Optional[int] = result_with_labels.to_tuple()
__lowercase : List[Any] = model(__a , start_positions=__a , end_positions=__a )
((__lowercase) , ) : int = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowerCAmelCase ( self : int , __a : Dict , __a : int , __a : List[str] , __a : str , __a : List[Any] , __a : Union[str, Any] , __a : Any , __a : Optional[int] , __a : Dict , ) -> List[str]:
"""simple docstring"""
__lowercase : Any = FlaubertForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(__a )
__lowercase : int = model(__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self : Dict , __a : Optional[Any] , __a : Optional[Any] , __a : Dict , __a : Tuple , __a : List[Any] , __a : Any , __a : str , __a : int , __a : Any , ) -> str:
"""simple docstring"""
__lowercase : List[str] = self.num_labels
__lowercase : str = FlaubertForTokenClassification(__a )
model.to(__a )
model.eval()
__lowercase : Any = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Tuple , __a : List[str] , __a : Dict , __a : Tuple , __a : Optional[Any] , __a : Dict , __a : Any , __a : List[str] , __a : Optional[Any] , __a : int , ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = self.num_choices
__lowercase : Optional[int] = FlaubertForMultipleChoice(config=__a )
model.to(__a )
model.eval()
__lowercase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : List[str] = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : Optional[int] = config_and_inputs
__lowercase : Dict = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_A : List[str] = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : str , __a : Optional[int] , __a : Optional[int] , __a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCAmelCase ( self : Optional[Any] , __a : str , __a : List[Any] , __a : List[str]=False ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__lowercase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
__lowercase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
__lowercase : Tuple = FlaubertModelTester(self )
__lowercase : str = ConfigTester(self , config_class=__a , emb_dim=37 )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__a )
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__a )
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__a )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__a )
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__a )
def lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__a )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__a )
@slow
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Any = FlaubertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@slow
@require_torch_gpu
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__lowercase : Tuple = True
__lowercase : List[Any] = model_class(config=__a )
__lowercase : List[Any] = self._prepare_for_class(__a , __a )
__lowercase : Tuple = torch.jit.trace(
__a , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__a , os.path.join(__a , """traced_model.pt""" ) )
__lowercase : Dict = torch.jit.load(os.path.join(__a , """traced_model.pt""" ) , map_location=__a )
loaded(inputs_dict["""input_ids"""].to(__a ) , inputs_dict["""attention_mask"""].to(__a ) )
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
__lowercase : Optional[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
__lowercase : Dict = model(__a )[0]
__lowercase : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
| 649
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : list[int] ):
if not nums:
return 0
__lowercase : Tuple = nums[0]
__lowercase : Tuple = 0
for num in nums[1:]:
__lowercase , __lowercase : List[str] = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase : Union[str, Any] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 649
|
lowerCamelCase : List[str] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 649
| 1
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : Dict = prime_factors(lowerCAmelCase_ )
if is_square_free(lowerCAmelCase_ ):
return -1 if len(lowerCAmelCase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Any=False ):
__lowercase : Any = """backbone.""" if is_semantic else """"""
__lowercase : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", """beit.embeddings.cls_token"""),
(F"{prefix}patch_embed.proj.weight", """beit.embeddings.patch_embeddings.projection.weight"""),
(F"{prefix}patch_embed.proj.bias", """beit.embeddings.patch_embeddings.projection.bias"""),
(F"{prefix}pos_embed", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : List[Any]=False ):
for i in range(config.num_hidden_layers ):
__lowercase : Tuple = """backbone.""" if is_semantic else """"""
# queries, keys and values
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
__lowercase : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
__lowercase : List[str] = in_proj_weight[
: config.hidden_size, :
]
__lowercase : Union[str, Any] = q_bias
__lowercase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
__lowercase : str = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
__lowercase : str = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
__lowercase : List[str] = gamma_a
__lowercase : Optional[int] = gamma_a
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
__lowercase : Tuple = dct.pop(lowerCAmelCase_ )
__lowercase : Tuple = val
def snake_case_ ( ):
__lowercase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : Any = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=False ):
__lowercase : Dict = False if """rvlcdip""" in checkpoint_url else True
__lowercase : Tuple = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase_ , use_mask_token=lowerCAmelCase_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__lowercase : Union[str, Any] = 1024
__lowercase : Optional[int] = 4096
__lowercase : List[Any] = 24
__lowercase : Dict = 16
# labels
if "rvlcdip" in checkpoint_url:
__lowercase : Optional[int] = 16
__lowercase : Any = """huggingface/label-files"""
__lowercase : Union[str, Any] = """rvlcdip-id2label.json"""
__lowercase : List[str] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Optional[int] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__lowercase : Optional[int] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )["""model"""]
__lowercase : Union[str, Any] = create_rename_keys(lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
# load HuggingFace model
__lowercase : Dict = BeitForMaskedImageModeling(lowerCAmelCase_ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
__lowercase : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" )
__lowercase : Optional[int] = encoding["""pixel_values"""]
__lowercase : str = model(lowerCAmelCase_ )
__lowercase : Tuple = outputs.logits
# verify logits
__lowercase : str = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowerCAmelCase_ ), "Shape of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
if has_lm_head:
__lowercase : Optional[Any] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
__lowercase : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase_ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
lowerCamelCase : List[str] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 649
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.