code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
def a( A : list ) -> list:
"""simple docstring"""
if len(lowerCamelCase__ ) <= 1:
return [tuple(lowerCamelCase__ )]
a = []
def generate(A : int , A : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCamelCase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
a = arr[k - 1], arr[i]
else: # k is odd
a = arr[k - 1], arr[0]
generate(k - 1 , lowerCamelCase__ )
generate(len(lowerCamelCase__ ) , lowerCamelCase__ )
return res
if __name__ == "__main__":
_lowercase: List[str] = input("Enter numbers separated by a comma:\n").strip()
_lowercase: Optional[Any] = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 227
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : int = logging.get_logger(__name__)
A__ : List[str] = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[Any] = "pix2struct_text_model"
_UpperCAmelCase :str = ["past_key_values"]
_UpperCAmelCase :str = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , snake_case__ : Any=5_0244 , snake_case__ : Optional[int]=768 , snake_case__ : Dict=64 , snake_case__ : List[str]=2048 , snake_case__ : Dict=12 , snake_case__ : Any=12 , snake_case__ : Dict=32 , snake_case__ : int=128 , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=1E-6 , snake_case__ : Any=1.0 , snake_case__ : int="gelu_new" , snake_case__ : Optional[Any]=0 , snake_case__ : Any=False , snake_case__ : Any=0 , snake_case__ : Any=1 , snake_case__ : Optional[int]=False , snake_case__ : Tuple=True , **snake_case__ : Any , ):
lowerCamelCase_ : List[str] =vocab_size
lowerCamelCase_ : Tuple =hidden_size
lowerCamelCase_ : Optional[int] =d_kv
lowerCamelCase_ : List[Any] =d_ff
lowerCamelCase_ : Tuple =num_layers
lowerCamelCase_ : Optional[int] =num_heads
lowerCamelCase_ : Any =relative_attention_num_buckets
lowerCamelCase_ : Optional[int] =relative_attention_max_distance
lowerCamelCase_ : List[Any] =dropout_rate
lowerCamelCase_ : str =layer_norm_epsilon
lowerCamelCase_ : int =initializer_factor
lowerCamelCase_ : str =use_cache
lowerCamelCase_ : int =eos_token_id
lowerCamelCase_ : Optional[Any] =decoder_start_token_id
# for backwards compatibility
lowerCamelCase_ : Optional[Any] =dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , snake_case__ : Union[str, os.PathLike] , **snake_case__ : str ):
cls._set_token_in_kwargs(snake_case__ )
lowerCamelCase_ , lowerCamelCase_ : Any =cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowerCamelCase_ : List[Any] =config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[Any] = "pix2struct_vision_model"
def __init__( self : Optional[int] , snake_case__ : Tuple=768 , snake_case__ : str=768 , snake_case__ : Union[str, Any]=2048 , snake_case__ : Tuple=64 , snake_case__ : List[Any]=12 , snake_case__ : Dict=12 , snake_case__ : int="gelu_new" , snake_case__ : str=1E-6 , snake_case__ : int=0.0 , snake_case__ : int=0.0 , snake_case__ : Dict=1E-10 , snake_case__ : Tuple=1.0 , snake_case__ : int=4096 , snake_case__ : Tuple=32 , snake_case__ : List[str]=128 , **snake_case__ : List[Any] , ):
super().__init__(**snake_case__ )
lowerCamelCase_ : int =hidden_size
lowerCamelCase_ : List[Any] =patch_embed_hidden_size
lowerCamelCase_ : Tuple =d_ff
lowerCamelCase_ : List[Any] =dropout_rate
lowerCamelCase_ : Dict =num_hidden_layers
lowerCamelCase_ : List[str] =num_attention_heads
lowerCamelCase_ : Optional[Any] =initializer_range
lowerCamelCase_ : int =initializer_factor
lowerCamelCase_ : Any =attention_dropout
lowerCamelCase_ : List[str] =layer_norm_eps
lowerCamelCase_ : int =dense_act_fn
lowerCamelCase_ : Optional[Any] =seq_len
lowerCamelCase_ : Optional[int] =relative_attention_num_buckets
lowerCamelCase_ : Optional[int] =relative_attention_max_distance
lowerCamelCase_ : Dict =d_kv
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Dict ):
cls._set_token_in_kwargs(snake_case__ )
lowerCamelCase_ , lowerCamelCase_ : Dict =cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowerCamelCase_ : List[Any] =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :str = "pix2struct"
_UpperCAmelCase :List[str] = True
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : Optional[Any]=None , snake_case__ : List[str]=1.0 , snake_case__ : List[Any]=0.02 , snake_case__ : List[Any]=False , snake_case__ : int=False , snake_case__ : Any=True , **snake_case__ : List[Any] , ):
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
lowerCamelCase_ : Dict ={}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
lowerCamelCase_ : int ={}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
lowerCamelCase_ : Any =PixaStructTextConfig(**snake_case__ )
lowerCamelCase_ : Optional[Any] =PixaStructVisionConfig(**snake_case__ )
lowerCamelCase_ : str =self.text_config.decoder_start_token_id
lowerCamelCase_ : Optional[int] =self.text_config.pad_token_id
lowerCamelCase_ : List[Any] =self.text_config.eos_token_id
lowerCamelCase_ : int =initializer_factor
lowerCamelCase_ : Optional[Any] =initializer_range
lowerCamelCase_ : Any =self.initializer_range
lowerCamelCase_ : List[Any] =self.initializer_range
lowerCamelCase_ : List[str] =is_vqa
@classmethod
def UpperCAmelCase__ ( cls : List[str] , snake_case__ : PixaStructTextConfig , snake_case__ : PixaStructVisionConfig , **snake_case__ : Optional[int] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Tuple =copy.deepcopy(self.__dict__ )
lowerCamelCase_ : Dict =self.text_config.to_dict()
lowerCamelCase_ : Union[str, Any] =self.vision_config.to_dict()
lowerCamelCase_ : Dict =self.__class__.model_type
return output
| 144
| 0
|
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
A__ : Optional[Any] = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''autoformer'''
A__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Union[str, Any] , __a : Optional[int] = None , __a : Optional[int] = None , __a : str = "student_t" , __a : str = "nll" , __a : int = 1 , __a : List[int] = [1, 2, 3, 4, 5, 6, 7] , __a : bool = True , __a : int = 0 , __a : int = 0 , __a : int = 0 , __a : int = 0 , __a : Optional[List[int]] = None , __a : Optional[List[int]] = None , __a : int = 64 , __a : int = 2 , __a : int = 2 , __a : int = 2 , __a : int = 2 , __a : int = 32 , __a : int = 32 , __a : str = "gelu" , __a : float = 0.1 , __a : float = 0.1 , __a : float = 0.1 , __a : float = 0.1 , __a : float = 0.1 , __a : int = 100 , __a : float = 0.0_2 , __a : bool = True , __a : int=True , __a : int = 10 , __a : int = 25 , __a : int = 3 , **__a : int , ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[int] = prediction_length
__snake_case : int = context_length if context_length is not None else prediction_length
__snake_case : Any = distribution_output
__snake_case : List[str] = loss
__snake_case : List[Any] = input_size
__snake_case : List[str] = num_time_features
__snake_case : int = lags_sequence
__snake_case : List[str] = scaling
__snake_case : Optional[int] = num_dynamic_real_features
__snake_case : Tuple = num_static_real_features
__snake_case : str = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__snake_case : List[Any] = cardinality
else:
__snake_case : List[Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__snake_case : int = embedding_dimension
else:
__snake_case : Optional[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__snake_case : Any = num_parallel_samples
# Transformer architecture configuration
__snake_case : List[str] = input_size * len(self.lags_sequence ) + self._number_of_features
__snake_case : Optional[int] = d_model
__snake_case : List[Any] = encoder_attention_heads
__snake_case : List[Any] = decoder_attention_heads
__snake_case : Any = encoder_ffn_dim
__snake_case : Optional[int] = decoder_ffn_dim
__snake_case : Tuple = encoder_layers
__snake_case : Any = decoder_layers
__snake_case : List[str] = dropout
__snake_case : Optional[int] = attention_dropout
__snake_case : Optional[Any] = activation_dropout
__snake_case : Optional[Any] = encoder_layerdrop
__snake_case : List[Any] = decoder_layerdrop
__snake_case : Optional[Any] = activation_function
__snake_case : Tuple = init_std
__snake_case : Optional[int] = use_cache
# Autoformer
__snake_case : List[str] = label_length
__snake_case : Optional[int] = moving_average
__snake_case : List[str] = autocorrelation_factor
super().__init__(is_encoder_decoder=__a , **__a )
@property
def A_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 358
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class snake_case__ ( unittest.TestCase ):
def A_ ( self : int ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = tempfile.mkdtemp()
# fmt: off
__snake_case : List[str] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
__snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__snake_case : List[str] = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
__snake_case : Optional[Any] = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__a , __a )
def A_ ( self : Optional[int] , **__a : Dict ) -> int:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : int , **__a : Dict ) -> Tuple:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self : str ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : List[str] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Dict = self.get_image_processor()
__snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
__snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__snake_case : Tuple = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__snake_case : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : str = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : int = self.prepare_image_inputs()
__snake_case : List[str] = image_processor(__a , return_tensors='np' )
__snake_case : List[str] = processor(images=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Optional[int] = 'lower newer'
__snake_case : Dict = processor(text=__a )
__snake_case : List[Any] = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : List[Any] = 'lower newer'
__snake_case : Optional[Any] = self.prepare_image_inputs()
__snake_case : Union[str, Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(__a ):
processor()
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : int = processor.batch_decode(__a )
__snake_case : Optional[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def A_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[str] = self.get_image_processor()
__snake_case : Dict = self.get_tokenizer()
__snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Union[str, Any] = 'lower newer'
__snake_case : Tuple = self.prepare_image_inputs()
__snake_case : Union[str, Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 0
| 0
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
lowercase__ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def A_ ( snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__UpperCamelCase = model_type_to_module_name(snake_case )
__UpperCamelCase = importlib.import_module(f".{module_name}" , '''transformers.models''' )
try:
return getattr(snake_case , snake_case )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(snake_case , '''__name__''' , snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__UpperCamelCase = importlib.import_module('''transformers''' )
if hasattr(snake_case , snake_case ):
return getattr(snake_case , snake_case )
return None
def A_ ( snake_case : Union[str, os.PathLike] , snake_case : Optional[Union[str, os.PathLike]] = None , snake_case : bool = False , snake_case : bool = False , snake_case : Optional[Dict[str, str]] = None , snake_case : Optional[Union[bool, str]] = None , snake_case : Optional[str] = None , snake_case : bool = False , **snake_case : List[Any] , ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase = get_file_from_repo(
snake_case , snake_case , cache_dir=snake_case , force_download=snake_case , resume_download=snake_case , proxies=snake_case , use_auth_token=snake_case , revision=snake_case , local_files_only=snake_case , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(snake_case , encoding='''utf-8''' ) as reader:
return json.load(snake_case )
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self )-> Dict:
'''simple docstring'''
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(SCREAMING_SNAKE_CASE_ )
def A__ ( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> Dict:
'''simple docstring'''
__UpperCamelCase = kwargs.pop('''config''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = kwargs.pop('''trust_remote_code''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = True
__UpperCamelCase , __UpperCamelCase = FeatureExtractionMixin.get_feature_extractor_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = config_dict.get('''feature_extractor_type''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
__UpperCamelCase = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# It could be in `config.feature_extractor_type``
__UpperCamelCase = getattr(SCREAMING_SNAKE_CASE_ , '''feature_extractor_type''' , SCREAMING_SNAKE_CASE_ )
if hasattr(SCREAMING_SNAKE_CASE_ , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
__UpperCamelCase = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
__UpperCamelCase = feature_extractor_class_from_name(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = feature_extractor_auto_map is not None
__UpperCamelCase = feature_extractor_class is not None or type(SCREAMING_SNAKE_CASE_ ) in FEATURE_EXTRACTOR_MAPPING
__UpperCamelCase = resolve_trust_remote_code(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if has_remote_code and trust_remote_code:
__UpperCamelCase = get_class_from_dynamic_module(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = kwargs.pop('''code_revision''' , SCREAMING_SNAKE_CASE_ )
if os.path.isdir(SCREAMING_SNAKE_CASE_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(SCREAMING_SNAKE_CASE_ ) in FEATURE_EXTRACTOR_MAPPING:
__UpperCamelCase = FEATURE_EXTRACTOR_MAPPING[type(SCREAMING_SNAKE_CASE_ )]
return feature_extractor_class.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
raise ValueError(
F"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
F"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[Any]:
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 328
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = size if size is not None else {'''shortest_edge''': 224}
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = crop_pct
__UpperCamelCase = resample
__UpperCamelCase = do_center_crop
__UpperCamelCase = crop_size
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
if crop_pct is not None:
if "shortest_edge" in size:
__UpperCamelCase = int(size['''shortest_edge'''] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__UpperCamelCase = int(size['''height'''] / crop_pct )
else:
__UpperCamelCase = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct ))
else:
raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
else:
if "shortest_edge" in size:
__UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ )
elif "height" in size and "width" in size:
__UpperCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"size must contain 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> str:
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , )-> PIL.Image.Image:
'''simple docstring'''
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase = image_std if image_std is not None else self.image_std
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
__UpperCamelCase = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_pct is None:
raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
__UpperCamelCase = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , crop_pct=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
__UpperCamelCase = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
__UpperCamelCase = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
__UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 328
| 1
|
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( __lowercase ,unittest.TestCase ):
A_ = GPTaTokenizer
A_ = GPTaTokenizerFast
A_ = True
A_ = {'add_prefix_space': True}
A_ = False
def UpperCAmelCase__ ( self : List[Any] )->Optional[int]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
__lowerCAmelCase : List[Any] = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
__lowerCAmelCase : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowerCAmelCase : str = {"""unk_token""": """<unk>"""}
__lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_snake_case ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_snake_case ) )
def UpperCAmelCase__ ( self : Optional[int] , **_snake_case : int )->Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCAmelCase__ ( self : int , **_snake_case : List[Any] )->Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : int )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : str = """lower newer"""
__lowerCAmelCase : Optional[int] = """lower newer"""
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[int] )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCAmelCase : Dict = """lower newer"""
__lowerCAmelCase : Union[str, Any] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowerCAmelCase : Optional[Any] = tokenizer.tokenize(_snake_case , add_prefix_space=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowerCAmelCase : List[str] = tokens + [tokenizer.unk_token]
__lowerCAmelCase : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def UpperCAmelCase__ ( self : Optional[Any] )->str:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__lowerCAmelCase : List[str] = self.get_tokenizer()
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=_snake_case )
__lowerCAmelCase : str = """lower newer"""
# Testing tokenization
__lowerCAmelCase : Optional[int] = tokenizer.tokenize(_snake_case , add_prefix_space=_snake_case )
__lowerCAmelCase : Dict = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
# Testing conversion to ids without special tokens
__lowerCAmelCase : Dict = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
__lowerCAmelCase : int = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
# Testing conversion to ids with special tokens
__lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=_snake_case )
__lowerCAmelCase : Any = tokenizer.encode(_snake_case , add_prefix_space=_snake_case )
__lowerCAmelCase : List[Any] = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
# Testing the unknown token
__lowerCAmelCase : str = tokens + [rust_tokenizer.unk_token]
__lowerCAmelCase : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def UpperCAmelCase__ ( self : int , *_snake_case : Any , **_snake_case : Tuple )->Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] , _snake_case : Optional[Any]=15 )->Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__lowerCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
# Simple input
__lowerCAmelCase : int = """This is a simple input"""
__lowerCAmelCase : List[str] = ["""This is a simple input 1""", """This is a simple input 2"""]
__lowerCAmelCase : int = ("""This is a simple input""", """This is a pair""")
__lowerCAmelCase : Union[str, Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding="""max_length""" )
# Simple input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding="""max_length""" )
# Simple input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding="""max_length""" , )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding="""max_length""" )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding="""max_length""" )
# Pair input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding="""max_length""" , )
def UpperCAmelCase__ ( self : Tuple )->Dict:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
__lowerCAmelCase : str = """This is a simple input"""
__lowerCAmelCase : str = ["""This is a simple input looooooooong""", """This is a simple input"""]
__lowerCAmelCase : Optional[Any] = ("""This is a simple input""", """This is a pair""")
__lowerCAmelCase : Any = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
__lowerCAmelCase : Optional[int] = tokenizer.pad_token_id
__lowerCAmelCase : str = tokenizer(_snake_case , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
__lowerCAmelCase : str = tokenizer(_snake_case , padding=_snake_case , truncate=_snake_case , return_tensors="""np""" )
__lowerCAmelCase : Dict = tokenizer(*_snake_case , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
__lowerCAmelCase : List[str] = tokenizer(_snake_case , padding=_snake_case , truncate=_snake_case , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def UpperCAmelCase__ ( self : str )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : str = """$$$"""
__lowerCAmelCase : Tuple = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=_snake_case , add_bos_token=_snake_case )
__lowerCAmelCase : Tuple = """This is a simple input"""
__lowerCAmelCase : Dict = ["""This is a simple input 1""", """This is a simple input 2"""]
__lowerCAmelCase : List[Any] = tokenizer.bos_token_id
__lowerCAmelCase : Union[str, Any] = tokenizer(_snake_case )
__lowerCAmelCase : int = tokenizer(_snake_case )
self.assertEqual(out_s.input_ids[0] , _snake_case )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__lowerCAmelCase : Tuple = tokenizer.decode(out_s.input_ids )
__lowerCAmelCase : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _snake_case )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def UpperCAmelCase__ ( self : List[str] )->Dict:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : int )->Dict:
'''simple docstring'''
__lowerCAmelCase : Dict = [self.get_tokenizer(do_lower_case=_snake_case , add_bos_token=_snake_case )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__lowerCAmelCase : int = """Encode this."""
__lowerCAmelCase : List[Any] = """This one too please."""
__lowerCAmelCase : Union[str, Any] = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
encoded_sequence += tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
__lowerCAmelCase : int = tokenizer.encode_plus(
_snake_case , _snake_case , add_special_tokens=_snake_case , return_special_tokens_mask=_snake_case , )
__lowerCAmelCase : str = encoded_sequence_dict["""input_ids"""]
__lowerCAmelCase : List[str] = encoded_sequence_dict["""special_tokens_mask"""]
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
__lowerCAmelCase : Tuple = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_snake_case )
]
__lowerCAmelCase : Union[str, Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(_snake_case , _snake_case )
@require_tokenizers
class snake_case_ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : int )->Any:
'''simple docstring'''
__lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=_snake_case )
__lowerCAmelCase : List[Any] = """A photo of a cat"""
__lowerCAmelCase : List[str] = tokenizer.encode(
_snake_case , )
self.assertEqual(_snake_case , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""test_opt""" )
__lowerCAmelCase : int = AutoTokenizer.from_pretrained("""./test_opt""" )
__lowerCAmelCase : Any = tokenizer.encode(
_snake_case , )
self.assertEqual(_snake_case , [2, 250, 1345, 9, 10, 4758] )
def UpperCAmelCase__ ( self : int )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=_snake_case )
__lowerCAmelCase : Optional[int] = """A photo of a cat"""
__lowerCAmelCase : List[Any] = tokenizer.encode(
_snake_case , )
# Same as above
self.assertEqual(_snake_case , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("""This test is failing because of a bug in the fast tokenizer""" )
def UpperCAmelCase__ ( self : Union[str, Any] )->str:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=_snake_case )
__lowerCAmelCase : str = """bos"""
__lowerCAmelCase : Tuple = tokenizer.get_vocab()["""bos"""]
__lowerCAmelCase : Any = """A photo of a cat"""
__lowerCAmelCase : Optional[int] = tokenizer.encode(
_snake_case , )
# We changed the bos token
self.assertEqual(_snake_case , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""./tok""" )
__lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""./tok""" )
self.assertTrue(tokenizer.is_fast )
__lowerCAmelCase : List[str] = tokenizer.encode(
_snake_case , )
self.assertEqual(_snake_case , [31957, 250, 1345, 9, 10, 4758] )
| 367
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class snake_case_ ( __lowercase ,unittest.TestCase ):
A_ = PegasusTokenizer
A_ = PegasusTokenizerFast
A_ = True
A_ = True
def UpperCAmelCase__ ( self : List[str] )->Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase : Optional[int] = PegasusTokenizer(_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ ( self : str )->Dict:
'''simple docstring'''
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def UpperCAmelCase__ ( self : Optional[Any] , **_snake_case : Tuple )->PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCAmelCase__ ( self : Dict , _snake_case : List[Any] )->Tuple:
'''simple docstring'''
return ("This is a test", "This is a test")
def UpperCAmelCase__ ( self : Union[str, Any] )->Dict:
'''simple docstring'''
__lowerCAmelCase : Dict = """</s>"""
__lowerCAmelCase : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def UpperCAmelCase__ ( self : int )->Tuple:
'''simple docstring'''
__lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_snake_case ) , 1103 )
def UpperCAmelCase__ ( self : Optional[int] )->Optional[int]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def UpperCAmelCase__ ( self : Dict )->str:
'''simple docstring'''
__lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCAmelCase : str = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
__lowerCAmelCase : str = rust_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
__lowerCAmelCase : Tuple = py_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : Optional[int] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__lowerCAmelCase : Tuple = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
__lowerCAmelCase : List[str] = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
__lowerCAmelCase : str = tokenizer([raw_input_str] , return_tensors=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : List[str] )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
__lowerCAmelCase : Tuple = """To ensure a smooth flow of bank resolutions."""
__lowerCAmelCase : Optional[Any] = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
__lowerCAmelCase : int = tokenizer([raw_input_str] , return_tensors=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase__ ( self : Any )->Any:
'''simple docstring'''
__lowerCAmelCase : List[Any] = ["""This is going to be way too long.""" * 150, """short example"""]
__lowerCAmelCase : Union[str, Any] = ["""not super long but more than 5 tokens""", """tiny"""]
__lowerCAmelCase : Dict = self._large_tokenizer(_snake_case , padding=_snake_case , truncation=_snake_case , return_tensors="""pt""" )
__lowerCAmelCase : Tuple = self._large_tokenizer(
text_target=_snake_case , max_length=5 , padding=_snake_case , truncation=_snake_case , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_snake_case ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase__ ( self : Optional[Any] )->Any:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class snake_case_ ( __lowercase ,unittest.TestCase ):
A_ = PegasusTokenizer
A_ = PegasusTokenizerFast
A_ = True
A_ = True
def UpperCAmelCase__ ( self : Tuple )->Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase : Any = PegasusTokenizer(_snake_case , offset=0 , mask_token_sent=_snake_case , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ ( self : Any )->str:
'''simple docstring'''
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def UpperCAmelCase__ ( self : Union[str, Any] , **_snake_case : Optional[Any] )->PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCAmelCase__ ( self : List[str] , _snake_case : Optional[int] )->Union[str, Any]:
'''simple docstring'''
return ("This is a test", "This is a test")
def UpperCAmelCase__ ( self : List[Any] )->str:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCAmelCase : int = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
__lowerCAmelCase : str = rust_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
__lowerCAmelCase : Tuple = py_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
@require_torch
def UpperCAmelCase__ ( self : str )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : int = ["""This is going to be way too long.""" * 1000, """short example"""]
__lowerCAmelCase : Optional[int] = ["""not super long but more than 5 tokens""", """tiny"""]
__lowerCAmelCase : str = self._large_tokenizer(_snake_case , padding=_snake_case , truncation=_snake_case , return_tensors="""pt""" )
__lowerCAmelCase : List[Any] = self._large_tokenizer(
text_target=_snake_case , max_length=5 , padding=_snake_case , truncation=_snake_case , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_snake_case ) == 2 # input_ids, attention_mask.
def UpperCAmelCase__ ( self : Optional[Any] )->Any:
'''simple docstring'''
__lowerCAmelCase : Tuple = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
__lowerCAmelCase : Optional[Any] = self._large_tokenizer(_snake_case ).input_ids
self.assertListEqual(
_snake_case , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 232
| 0
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase : str = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = TextaTextGenerationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__)
return generator, ["Something to write", "Something else"]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: List[Any] = generator("Something there")
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ANY(lowerCAmelCase__)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there"))
SCREAMING_SNAKE_CASE_: List[Any] = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
] , )
SCREAMING_SNAKE_CASE_: Dict = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
] , )
with self.assertRaises(lowerCAmelCase__):
generator(4)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt")
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_: Union[str, Any] = generator("Something there" , do_sample=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}])
SCREAMING_SNAKE_CASE_: Union[str, Any] = 3
SCREAMING_SNAKE_CASE_: Any = generator(
"Something there" , num_return_sequences=lowerCAmelCase__ , num_beams=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Any = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = generator("This is a test" , do_sample=lowerCAmelCase__ , num_return_sequences=2 , return_tensors=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
] , )
SCREAMING_SNAKE_CASE_: str = generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE_: Union[str, Any] = "<pad>"
SCREAMING_SNAKE_CASE_: Tuple = generator(
["This is a test", "This is a second test"] , do_sample=lowerCAmelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCAmelCase__ , )
self.assertEqual(
lowerCAmelCase__ , [
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
] , )
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf")
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_: List[Any] = generator("Something there" , do_sample=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}])
| 13
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
lowerCAmelCase : Optional[Any] = {
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_: Tuple = bs[:]
SCREAMING_SNAKE_CASE_: str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_: Optional[int] = [chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = set()
SCREAMING_SNAKE_CASE_: Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_: Tuple = char
return pairs
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]="replace" , lowerCAmelCase__ : Optional[Any]="<s>" , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : Optional[Any]="</s>" , lowerCAmelCase__ : int="<s>" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Any="<mask>" , lowerCAmelCase__ : Union[str, Any]=False , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token
SCREAMING_SNAKE_CASE_: str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token
SCREAMING_SNAKE_CASE_: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else unk_token
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="utf-8") as vocab_handle:
SCREAMING_SNAKE_CASE_: Tuple = json.load(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_: List[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="utf-8") as merges_handle:
SCREAMING_SNAKE_CASE_: List[Any] = merges_handle.read().split("\n")[1:-1]
SCREAMING_SNAKE_CASE_: str = [tuple(merge.split()) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_: List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_: List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return len(self.encoder)
def _SCREAMING_SNAKE_CASE ( self : int):
return dict(self.encoder , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[str]):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_: Optional[int] = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = get_pairs(lowerCAmelCase__)
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_: int = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__: self.bpe_ranks.get(lowerCAmelCase__ , float("inf")))
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = bigram
SCREAMING_SNAKE_CASE_: Optional[int] = []
SCREAMING_SNAKE_CASE_: List[Any] = 0
while i < len(lowerCAmelCase__):
try:
SCREAMING_SNAKE_CASE_: List[Any] = word.index(lowerCAmelCase__ , lowerCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
SCREAMING_SNAKE_CASE_: Tuple = j
if word[i] == first and i < len(lowerCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
SCREAMING_SNAKE_CASE_: str = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = new_word
if len(lowerCAmelCase__) == 1:
break
else:
SCREAMING_SNAKE_CASE_: Dict = get_pairs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = " ".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = word
return word
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__).split(" "))
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Union[str, Any]):
return self.decoder.get(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: Any = "".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8" , errors=self.errors)
return text
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__) + "\n")
SCREAMING_SNAKE_CASE_: List[Any] = 0
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
SCREAMING_SNAKE_CASE_: List[Any] = token_index
writer.write(" ".join(lowerCAmelCase__) + "\n")
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_: Optional[int] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__)) + [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=False , **lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: List[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_: Optional[Any] = " " + text
return (text, kwargs)
| 13
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
snake_case_ = logging.get_logger(__name__)
@dataclass
class A_ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self :Union[str, Any] , **lowercase_ :int ) -> List[str]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCAmelCase = deprecated_arg[3:]
setattr(self , snake_case__ , not kwargs.pop(snake_case__ ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
UpperCAmelCase = kwargs.pop('torchscript' , self.torchscript )
UpperCAmelCase = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
UpperCAmelCase = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**snake_case__ )
__UpperCamelCase = field(default=lowerCamelCase__ , metadata={"""help""": """Trace the models using torchscript"""} )
__UpperCamelCase = field(default=lowerCamelCase__ , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
__UpperCamelCase = field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Tuple["torch.device", int]:
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
UpperCAmelCase = torch.device('cpu' )
UpperCAmelCase = 0
elif is_torch_tpu_available():
UpperCAmelCase = xm.xla_device()
UpperCAmelCase = 0
else:
UpperCAmelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
UpperCAmelCase = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase__ ( self :str ) -> int:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase__ ( self :int ) -> int:
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase__ ( self :Dict ) -> "torch.device":
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def UpperCAmelCase__ ( self :Any ) -> List[Any]:
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[int]:
return self.n_gpu > 0
| 362
|
"""simple docstring"""
from math import factorial, radians
def _lowerCAmelCase ( lowercase_ , lowercase_ = 18 , lowercase_ = 10 ):
UpperCAmelCase = angle_in_degrees - ((angle_in_degrees // 3_6_0.0) * 3_6_0.0)
# Converting from degrees to radians
UpperCAmelCase = radians(lowercase_ )
UpperCAmelCase = angle_in_radians
UpperCAmelCase = 3
UpperCAmelCase = -1
for _ in range(lowercase_ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase_ )
UpperCAmelCase = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase_ , lowercase_ )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 181
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : int = ['''image_processor''', '''tokenizer''']
__lowercase : List[Any] = '''BlipImageProcessor'''
__lowercase : Dict = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = False
super().__init__(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.image_processor
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""")
# Get only text
if images is None:
__SCREAMING_SNAKE_CASE = self.tokenizer
__SCREAMING_SNAKE_CASE = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
return text_encoding
# add pixel_values
__SCREAMING_SNAKE_CASE = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__)
if text is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
else:
__SCREAMING_SNAKE_CASE = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase__)
return encoding_image_processor
def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__):
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__):
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__)
@property
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
__SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 100
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return "".join([hex(snake_case__ )[2:].zfill(2 ).upper() for byte in list(snake_case__ )] )
def __lowerCAmelCase ( snake_case__ ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(snake_case__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(snake_case__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 0
|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase__( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray )->Dict:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ) ) )
def UpperCamelCase__( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray )->Union[str, Any]:
if dataset.ndim != value_array.ndim:
A__ = (
'''Wrong input data\'s dimensions... '''
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(UpperCamelCase__ )
try:
if dataset.shape[1] != value_array.shape[1]:
A__ = (
'''Wrong input data\'s shape... '''
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(UpperCamelCase__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
A__ = (
'''Input data have different datatype... '''
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(UpperCamelCase__ )
A__ = []
for value in value_array:
A__ = euclidean(UpperCamelCase__ , dataset[0] )
A__ = dataset[0].tolist()
for dataset_value in dataset[1:]:
A__ = euclidean(UpperCamelCase__ , UpperCamelCase__ )
if dist > temp_dist:
A__ = temp_dist
A__ = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase__( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray )->int:
return np.dot(UpperCamelCase__ , UpperCamelCase__ ) / (norm(UpperCamelCase__ ) * norm(UpperCamelCase__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''image_processor''', '''tokenizer''']
__SCREAMING_SNAKE_CASE = '''Pix2StructImageProcessor'''
__SCREAMING_SNAKE_CASE = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self,__lowerCamelCase,__lowerCamelCase ):
A__ = False
super().__init__(__lowerCamelCase,__lowerCamelCase )
def __call__( self,__lowerCamelCase=None,__lowerCamelCase = None,__lowerCamelCase = True,__lowerCamelCase = False,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = 2048,__lowerCamelCase = 0,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = True,__lowerCamelCase = None,**__lowerCamelCase,):
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
A__ = self.tokenizer
A__ = self.tokenizer(
text=__lowerCamelCase,add_special_tokens=__lowerCamelCase,padding=__lowerCamelCase,truncation=__lowerCamelCase,max_length=__lowerCamelCase,stride=__lowerCamelCase,pad_to_multiple_of=__lowerCamelCase,return_attention_mask=__lowerCamelCase,return_overflowing_tokens=__lowerCamelCase,return_special_tokens_mask=__lowerCamelCase,return_offsets_mapping=__lowerCamelCase,return_token_type_ids=__lowerCamelCase,return_length=__lowerCamelCase,verbose=__lowerCamelCase,return_tensors=__lowerCamelCase,**__lowerCamelCase,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
A__ = self.image_processor(
__lowerCamelCase,return_tensors=__lowerCamelCase,max_patches=__lowerCamelCase,**__lowerCamelCase )
else:
# add pixel_values and bbox
A__ = self.image_processor(
__lowerCamelCase,return_tensors=__lowerCamelCase,max_patches=__lowerCamelCase,header_text=__lowerCamelCase,**__lowerCamelCase )
if text is not None and not self.image_processor.is_vqa:
A__ = self.tokenizer(
text=__lowerCamelCase,add_special_tokens=__lowerCamelCase,padding=__lowerCamelCase,truncation=__lowerCamelCase,max_length=__lowerCamelCase,stride=__lowerCamelCase,pad_to_multiple_of=__lowerCamelCase,return_attention_mask=__lowerCamelCase,return_overflowing_tokens=__lowerCamelCase,return_special_tokens_mask=__lowerCamelCase,return_offsets_mapping=__lowerCamelCase,return_token_type_ids=__lowerCamelCase,return_length=__lowerCamelCase,verbose=__lowerCamelCase,return_tensors=__lowerCamelCase,**__lowerCamelCase,)
if "attention_mask" in text_encoding:
A__ = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
A__ = text_encoding.pop('''input_ids''' )
else:
A__ = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCamelCase )
return encoding_image_processor
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.batch_decode(*__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.decode(*__lowerCamelCase,**__lowerCamelCase )
@property
def UpperCamelCase ( self ):
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 39
| 0
|
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
lowercase__ = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(__magic_name__ )
else:
lowercase__ = sylvester(number - 1 )
lowercase__ = num - 1
lowercase__ = num
return lower * upper + 1
if __name__ == "__main__":
print(F'The 8th number in Sylvester\'s sequence: {sylvester(8)}')
| 305
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
# fmt: off
a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
a = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
a = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : List[Any] ) ->int:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] , **__UpperCAmelCase : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
a = self.get_tokenizer()
a = self.get_image_processor()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
a = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
a = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = self.prepare_image_inputs()
a = image_processor(__UpperCAmelCase , return_tensors='''np''' )
a = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = processor(text=__UpperCAmelCase )
a = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__UpperCAmelCase ):
processor()
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__UpperCAmelCase )
a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 0
| 0
|
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = '▁'
_snake_case = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class a__ ( lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[Any] = BigBirdTokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = BigBirdTokenizerFast
_SCREAMING_SNAKE_CASE : Tuple = True
_SCREAMING_SNAKE_CASE : int = True
def _lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
_lowercase : int = self.tokenizer_class(_UpperCamelCase , keep_accents=_UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = "<s>"
_lowercase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(_UpperCamelCase ) , 1004 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowerCamelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowercase : Union[str, Any] = self.get_tokenizer()
_lowercase : Optional[int] = self.get_rust_tokenizer()
_lowercase : Any = "I was born in 92000, and this is falsé."
_lowercase : Any = tokenizer.tokenize(_UpperCamelCase )
_lowercase : List[str] = rust_tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_lowercase : List[Any] = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
_lowercase : Any = rust_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_lowercase : Optional[Any] = self.get_rust_tokenizer()
_lowercase : Any = tokenizer.encode(_UpperCamelCase )
_lowercase : Optional[int] = rust_tokenizer.encode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = BigBirdTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
_lowercase : str = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [285, 46, 10, 170, 382] , )
_lowercase : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_lowercase : str = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_lowercase : str = tokenizer.convert_ids_to_tokens(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _lowerCamelCase ( self ):
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = "Hello World!"
_lowercase : str = [65, 18536, 2260, 101, 66]
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
_lowercase : List[str] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@require_torch
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
_lowercase : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
_lowercase : Optional[int] = " ".join(_UpperCamelCase )
_lowercase : List[str] = self.big_tokenizer.encode_plus(_UpperCamelCase , return_tensors="pt" , return_token_type_ids=_UpperCamelCase )
_lowercase : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=_UpperCamelCase )
_lowercase : List[str] = BigBirdConfig(attention_type="original_full" )
_lowercase : Optional[int] = BigBirdModel(_UpperCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCamelCase )
model(**_UpperCamelCase )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
_lowercase : Optional[Any] = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = {"input_ids": [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 351
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : List[Any] = ['image_processor', 'tokenizer']
_SCREAMING_SNAKE_CASE : str = 'OwlViTImageProcessor'
_SCREAMING_SNAKE_CASE : List[str] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCamelCase , )
_lowercase : Optional[int] = kwargs.pop("feature_extractor" )
_lowercase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="max_length" , _UpperCamelCase="np" , **_UpperCamelCase ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_UpperCamelCase , _UpperCamelCase ) or (isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(text[0] , _UpperCamelCase )):
_lowercase : int = [self.tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )]
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(text[0] , _UpperCamelCase ):
_lowercase : str = []
# Maximum number of queries across batch
_lowercase : str = max([len(_UpperCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCamelCase ) != max_num_queries:
_lowercase : List[Any] = t + [" "] * (max_num_queries - len(_UpperCamelCase ))
_lowercase : Tuple = self.tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
encodings.append(_UpperCamelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_lowercase : List[Any] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_lowercase : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_lowercase : Union[str, Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_lowercase : int = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_lowercase : int = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_lowercase : Dict = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_lowercase : Optional[int] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_lowercase : List[str] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_lowercase : Optional[int] = BatchEncoding()
_lowercase : List[Any] = input_ids
_lowercase : Dict = attention_mask
if query_images is not None:
_lowercase : int = BatchEncoding()
_lowercase : Any = self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase ).pixel_values
_lowercase : Any = query_pixel_values
if images is not None:
_lowercase : str = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
if text is not None and images is not None:
_lowercase : List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_lowercase : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCamelCase ) , tensor_type=_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCamelCase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCamelCase , )
return self.image_processor
| 199
| 0
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
UpperCamelCase__ : Optional[Any] = random.Random()
if is_torch_available():
import torch
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] , _lowerCamelCase: Union[str, Any]=1.0 , _lowerCamelCase: Any=None , _lowerCamelCase: Any=None ):
if rng is None:
__SCREAMING_SNAKE_CASE : Tuple = global_rng
__SCREAMING_SNAKE_CASE : Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int=7 , lowerCAmelCase__ : Tuple=4_0_0 , lowerCAmelCase__ : Optional[int]=2_0_0_0 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : Dict=1_6_0_0_0 , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : List[str]=True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = parent
__SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
__SCREAMING_SNAKE_CASE : Optional[int] = min_seq_length
__SCREAMING_SNAKE_CASE : List[str] = max_seq_length
__SCREAMING_SNAKE_CASE : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__SCREAMING_SNAKE_CASE : List[Any] = feature_size
__SCREAMING_SNAKE_CASE : Tuple = padding_value
__SCREAMING_SNAKE_CASE : List[str] = sampling_rate
__SCREAMING_SNAKE_CASE : Dict = return_attention_mask
__SCREAMING_SNAKE_CASE : List[str] = do_normalize
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : str=False ):
"""simple docstring"""
def _flatten(lowerCAmelCase__ : Dict ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
__SCREAMING_SNAKE_CASE : Optional[int] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__SCREAMING_SNAKE_CASE : List[str] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__SCREAMING_SNAKE_CASE : Any = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = ASTFeatureExtractor
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = ASTFeatureExtractionTester(self )
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__SCREAMING_SNAKE_CASE : Dict = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
__SCREAMING_SNAKE_CASE : int = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
__SCREAMING_SNAKE_CASE : List[str] = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="""np""" ).input_values
__SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__SCREAMING_SNAKE_CASE : int = np.asarray(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract(lowerCAmelCase__ , return_tensors="""np""" ).input_values
__SCREAMING_SNAKE_CASE : Any = feat_extract(lowerCAmelCase__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
@require_torch
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
import torch
__SCREAMING_SNAKE_CASE : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__SCREAMING_SNAKE_CASE : Any = np.random.rand(1_0_0 ).astype(np.floataa )
__SCREAMING_SNAKE_CASE : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__SCREAMING_SNAKE_CASE : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__SCREAMING_SNAKE_CASE : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
from datasets import load_dataset
__SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__SCREAMING_SNAKE_CASE : str = ds.sort("""id""" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76,
-1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33,
-1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36,
-0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] )
# fmt: on
__SCREAMING_SNAKE_CASE : Optional[int] = self._load_datasamples(1 )
__SCREAMING_SNAKE_CASE : int = ASTFeatureExtractor()
__SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(lowerCAmelCase__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , lowerCAmelCase__ , atol=1E-4 ) )
| 112
|
from __future__ import annotations
import math
lowercase : Any = '2020.9.26'
lowercase : Union[str, Any] = 'xcodz-dot, cclaus, dhruvmanila'
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float) -> tuple[float, float]:
'''simple docstring'''
if not all(isinstance(_lowerCamelCase , (float, int)) for val in locals().values()):
__UpperCamelCase : str = F'Input values must either be float or int: {list(locals().values())}'
raise TypeError(_lowerCamelCase)
__UpperCamelCase : List[str] = ((x * distance) / (z + distance)) * scale
__UpperCamelCase : List[Any] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : str , _lowerCamelCase : float) -> tuple[float, float, float]:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase):
raise TypeError("Axis must be a str")
__UpperCamelCase : str = locals()
del input_variables["axis"]
if not all(isinstance(_lowerCamelCase , (float, int)) for val in input_variables.values()):
__UpperCamelCase : Dict = (
"Input values except axis must either be float or int: "
F'{list(input_variables.values())}'
)
raise TypeError(_lowerCamelCase)
__UpperCamelCase : Optional[Any] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
__UpperCamelCase : Tuple = x * math.cos(_lowerCamelCase) - y * math.sin(_lowerCamelCase)
__UpperCamelCase : Union[str, Any] = y * math.cos(_lowerCamelCase) + x * math.sin(_lowerCamelCase)
__UpperCamelCase : Any = z
elif axis == "x":
__UpperCamelCase : Dict = y * math.cos(_lowerCamelCase) - z * math.sin(_lowerCamelCase)
__UpperCamelCase : Any = z * math.cos(_lowerCamelCase) + y * math.sin(_lowerCamelCase)
__UpperCamelCase : List[str] = x
elif axis == "y":
__UpperCamelCase : Any = x * math.cos(_lowerCamelCase) - z * math.sin(_lowerCamelCase)
__UpperCamelCase : Any = z * math.cos(_lowerCamelCase) + x * math.sin(_lowerCamelCase)
__UpperCamelCase : Dict = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'")
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }")
print(f"{rotate(1.0, 2.0, 3.0, 'y', 9_0.0) = }")
| 232
| 0
|
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
__A : str = get_logger(__name__)
def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any]=0 ):
'''simple docstring'''
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
with FSDP.state_dict_type(
lowerCamelCase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ : Dict = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ : str = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
snake_case_ : Dict = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ : Union[str, Any] = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
snake_case_ : str = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ : List[str] = os.path.join(lowerCamelCase_ , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
logger.info(F'''Saving model to {ckpt_dir}''' )
snake_case_ : Optional[Any] = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=lowerCamelCase_ , storage_writer=dist_cp.FileSystemWriter(lowerCamelCase_ ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowerCamelCase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(lowerCamelCase_ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
snake_case_ : Optional[int] = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
snake_case_ : int = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'''Loading model from {input_model_file}''' )
snake_case_ : List[str] = torch.load(lowerCamelCase_ )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ : Optional[Any] = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
snake_case_ : List[Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'''Loading model from {input_model_file}''' )
snake_case_ : int = torch.load(lowerCamelCase_ )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ : Union[str, Any] = (
os.path.join(lowerCamelCase_ , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
snake_case_ : Optional[int] = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=lowerCamelCase_ , storage_reader=dist_cp.FileSystemReader(lowerCamelCase_ ) , planner=DefaultLoadPlanner() , )
snake_case_ : str = state_dict["""model"""]
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[Any]=0 ):
'''simple docstring'''
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
with FSDP.state_dict_type(
lowerCamelCase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ : List[str] = FSDP.optim_state_dict(lowerCamelCase_ , lowerCamelCase_ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
snake_case_ : int = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
snake_case_ : str = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
snake_case_ : Dict = os.path.join(lowerCamelCase_ , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(lowerCamelCase_ ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str , lowerCamelCase_ :Dict=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowerCamelCase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ : Optional[Any] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
snake_case_ : Optional[Any] = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
snake_case_ : Any = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
snake_case_ : Union[str, Any] = torch.load(lowerCamelCase_ )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
snake_case_ : Dict = (
os.path.join(lowerCamelCase_ , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
snake_case_ : List[str] = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(lowerCamelCase_ ) , )
snake_case_ : str = optim_state["""optimizer"""]
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
snake_case_ : str = FSDP.optim_state_dict_to_load(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
optimizer.load_state_dict(lowerCamelCase_ )
| 8
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__A : Optional[int] = logging.get_logger(__name__)
class __UpperCamelCase ( lowercase__ ):
def __init__( self :List[str] ,*_UpperCamelCase :str ,**_UpperCamelCase :Optional[int] ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" ,_UpperCamelCase ,)
super().__init__(*_UpperCamelCase ,**_UpperCamelCase )
| 8
| 1
|
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] ,A_ : list ) -> None:
A = set_counts
A = max(A_ )
A = len(A_ )
A = [1] * num_sets
A = list(range(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int ,A_ : int ) -> bool:
A = self.get_parent(A_ )
A = self.get_parent(A_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
A = 0
A = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
A = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
A = 0
A = src_parent
A = self.set_counts[src_parent]
A = max(self.max_set ,A_ )
return True
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int ) -> int:
if self.parents[disj_set] == disj_set:
return disj_set
A = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 74
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 181
| 0
|
import argparse
import struct
import unittest
class UpperCamelCase__ :
def __init__(self : int , snake_case_ : bytes ):
__a : Dict = data
# Initialize hash values
__a : Dict = [
0X6a09_e667,
0Xbb67_ae85,
0X3c6e_f372,
0Xa54f_f53a,
0X510e_527f,
0X9b05_688c,
0X1f83_d9ab,
0X5be0_cd19,
]
# Initialize round constants
__a : Union[str, Any] = [
0X428a_2f98,
0X7137_4491,
0Xb5c0_fbcf,
0Xe9b5_dba5,
0X3956_c25b,
0X59f1_11f1,
0X923f_82a4,
0Xab1c_5ed5,
0Xd807_aa98,
0X1283_5b01,
0X2431_85be,
0X550c_7dc3,
0X72be_5d74,
0X80de_b1fe,
0X9bdc_06a7,
0Xc19b_f174,
0Xe49b_69c1,
0Xefbe_4786,
0X0fc1_9dc6,
0X240c_a1cc,
0X2de9_2c6f,
0X4a74_84aa,
0X5cb0_a9dc,
0X76f9_88da,
0X983e_5152,
0Xa831_c66d,
0Xb003_27c8,
0Xbf59_7fc7,
0Xc6e0_0bf3,
0Xd5a7_9147,
0X06ca_6351,
0X1429_2967,
0X27b7_0a85,
0X2e1b_2138,
0X4d2c_6dfc,
0X5338_0d13,
0X650a_7354,
0X766a_0abb,
0X81c2_c92e,
0X9272_2c85,
0Xa2bf_e8a1,
0Xa81a_664b,
0Xc24b_8b70,
0Xc76c_51a3,
0Xd192_e819,
0Xd699_0624,
0Xf40e_3585,
0X106a_a070,
0X19a4_c116,
0X1e37_6c08,
0X2748_774c,
0X34b0_bcb5,
0X391c_0cb3,
0X4ed8_aa4a,
0X5b9c_ca4f,
0X682e_6ff3,
0X748f_82ee,
0X78a5_636f,
0X84c8_7814,
0X8cc7_0208,
0X90be_fffa,
0Xa450_6ceb,
0Xbef9_a3f7,
0Xc671_78f2,
]
__a : List[str] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def lowerCAmelCase (snake_case_ : bytes ):
__a : Optional[Any] = B'''\x80''' + (B'''\x00''' * (6_3 - (len(snake_case_ ) + 8) % 6_4))
__a : Dict = struct.pack('''>Q''' , (len(snake_case_ ) * 8) )
return data + padding + big_endian_integer
def lowerCAmelCase (self : Union[str, Any] ):
# Convert into blocks of 64 bytes
__a : Tuple = [
self.preprocessed_data[x : x + 6_4]
for x in range(0 , len(self.preprocessed_data ) , 6_4 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__a : Optional[Any] = list(struct.unpack('''>16L''' , snake_case_ ) )
# add 48 0-ed integers
words += [0] * 4_8
__a , __a , __a , __a , __a , __a , __a , __a : Optional[Any] = self.hashes
for index in range(0 , 6_4 ):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
__a : List[Any] = (
self.ror(words[index - 1_5] , 7 )
^ self.ror(words[index - 1_5] , 1_8 )
^ (words[index - 1_5] >> 3)
)
__a : str = (
self.ror(words[index - 2] , 1_7 )
^ self.ror(words[index - 2] , 1_9 )
^ (words[index - 2] >> 1_0)
)
__a : List[Any] = (
words[index - 1_6] + sa + words[index - 7] + sa
) % 0X1_0000_0000
# Compression
__a : int = self.ror(snake_case_ , 6 ) ^ self.ror(snake_case_ , 1_1 ) ^ self.ror(snake_case_ , 2_5 )
__a : Tuple = (e & f) ^ ((~e & 0Xffff_ffff) & g)
__a : Any = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0000_0000
__a : Tuple = self.ror(snake_case_ , 2 ) ^ self.ror(snake_case_ , 1_3 ) ^ self.ror(snake_case_ , 2_2 )
__a : int = (a & b) ^ (a & c) ^ (b & c)
__a : Optional[Any] = (sa + maj) % 0X1_0000_0000
__a , __a , __a , __a , __a , __a , __a , __a : Any = (
g,
f,
e,
((d + tempa) % 0X1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0X1_0000_0000),
)
__a : List[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
__a : Union[str, Any] = [
((element + mutated_hash_values[index]) % 0X1_0000_0000)
for index, element in enumerate(self.hashes )
]
__a : Optional[int] = ''''''.join([hex(snake_case_ )[2:].zfill(8 ) for value in self.hashes] )
def lowerCAmelCase (self : List[Any] , snake_case_ : int , snake_case_ : int ):
return 0Xffff_ffff & (value << (3_2 - rotations)) | (value >> rotations)
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : List[str] ):
import hashlib
__a : List[Any] = bytes('''Test String''' , '''utf-8''' )
self.assertEqual(SHAaaa(snake_case_ ).hash , hashlib.shaaaa(snake_case_ ).hexdigest() )
def __UpperCamelCase ( ):
import doctest
doctest.testmod()
__a : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument(
'''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
__a : Tuple = parser.parse_args()
__a : int = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
__a : List[str] = f.read()
else:
__a : str = bytes(lowerCAmelCase__ , '''utf-8''' )
print(SHAaaa(lowerCAmelCase__ ).hash )
if __name__ == "__main__":
main()
| 90
|
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ):
__a : Any = len(lowerCAmelCase__ )
__a : Union[str, Any] = []
for i in range(len(lowerCAmelCase__ ) - pat_len + 1 ):
__a : List[Any] = True
for j in range(lowerCAmelCase__ ):
if s[i + j] != pattern[j]:
__a : Union[str, Any] = False
break
if match_found:
position.append(lowerCAmelCase__ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 90
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A =logging.get_logger(__name__)
A ={
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _a ( __a ):
__a : List[Any] = """marian"""
__a : Union[str, Any] = ["""past_key_values"""]
__a : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] , lowercase : Union[str, Any]=58_101 , lowercase : Tuple=None , lowercase : str=1_024 , lowercase : Optional[int]=12 , lowercase : Optional[int]=4_096 , lowercase : int=16 , lowercase : List[Any]=12 , lowercase : int=4_096 , lowercase : Optional[int]=16 , lowercase : int=0.0 , lowercase : Tuple=0.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=True , lowercase : List[Any]="gelu" , lowercase : Tuple=1_024 , lowercase : str=0.1 , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Dict=0.02 , lowercase : Union[str, Any]=58_100 , lowercase : List[str]=False , lowercase : str=58_100 , lowercase : Any=0 , lowercase : Optional[Any]=0 , lowercase : Tuple=True , **lowercase : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase = vocab_size
UpperCAmelCase = decoder_vocab_size or vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , forced_eos_token_id=lowercase , **lowercase , )
class _a ( __a ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def A ( self : int ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase = {0: '''batch'''}
UpperCAmelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase , UpperCAmelCase = self.num_layers
for i in range(lowercase ):
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def A ( self : Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = super().outputs
else:
UpperCAmelCase = super(lowercase , self ).outputs
if self.use_past:
UpperCAmelCase , UpperCAmelCase = self.num_layers
for i in range(lowercase ):
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def A ( self : Dict , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Generate decoder inputs
UpperCAmelCase = seq_length if not self.use_past else 1
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
UpperCAmelCase = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase = dict(**lowercase , **lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
UpperCAmelCase = common_inputs['''decoder_input_ids'''].shape[1]
UpperCAmelCase , UpperCAmelCase = self.num_attention_heads
UpperCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase = decoder_seq_length + 3
UpperCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase , lowercase )] , dim=1 )
UpperCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase , UpperCAmelCase = self.num_layers
UpperCAmelCase = min(lowercase , lowercase )
UpperCAmelCase = max(lowercase , lowercase ) - min_num_layers
UpperCAmelCase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
) )
# TODO: test this.
UpperCAmelCase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase , lowercase ):
common_inputs["past_key_values"].append((torch.zeros(lowercase ), torch.zeros(lowercase )) )
return common_inputs
def A ( self : int , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase = seqlen + 2
UpperCAmelCase , UpperCAmelCase = self.num_layers
UpperCAmelCase , UpperCAmelCase = self.num_attention_heads
UpperCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase = common_inputs['''attention_mask'''].dtype
UpperCAmelCase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
UpperCAmelCase = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(lowercase )
]
return common_inputs
def A ( self : str , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase = tokenizer.num_special_tokens_to_add(lowercase )
UpperCAmelCase = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase = dict(tokenizer(lowercase , return_tensors=lowercase ) )
return common_inputs
def A ( self : List[str] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
else:
UpperCAmelCase = self._generate_dummy_inputs_for_causal_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
return common_inputs
def A ( self : List[Any] , lowercase : Any , lowercase : Tuple , lowercase : Any , lowercase : Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = super()._flatten_past_key_values_(lowercase , lowercase , lowercase , lowercase )
else:
UpperCAmelCase = super(lowercase , self )._flatten_past_key_values_(
lowercase , lowercase , lowercase , lowercase )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
| 34
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = TransfoXLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
_UpperCAmelCase = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase ( self , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = '<unk> UNwanted , running'
_UpperCAmelCase = '<unk> unwanted, running'
return input_text, output_text
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=UpperCAmelCase )
_UpperCAmelCase = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(UpperCAmelCase , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [0, 4, 8, 7] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
_UpperCAmelCase = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
_UpperCAmelCase = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(UpperCAmelCase ) , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = len(UpperCAmelCase )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(UpperCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 39
| 0
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
return x + 2
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = '''x = 3'''
snake_case_ : str = {}
snake_case_ : Optional[int] = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {'''x''': 3} )
snake_case_ : Union[str, Any] = '''x = y'''
snake_case_ : Any = {'''y''': 5}
snake_case_ : str = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {'''x''': 5, '''y''': 5} )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = '''y = add_two(x)'''
snake_case_ : str = {'''x''': 3}
snake_case_ : Tuple = evaluate(__magic_name__ , {'''add_two''': add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
snake_case_ : Dict = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = '''x = 3'''
snake_case_ : List[str] = {}
snake_case_ : Optional[Any] = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {'''x''': 3} )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
snake_case_ : int = {'''x''': 3}
snake_case_ : Any = evaluate(__magic_name__ , {'''add_two''': add_two} , state=__magic_name__ )
self.assertDictEqual(__magic_name__ , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(__magic_name__ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Tuple = '''x = 3\ny = 5'''
snake_case_ : str = {}
snake_case_ : List[Any] = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {'''x''': 3, '''y''': 5} )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = '''text = f\'This is x: {x}.\''''
snake_case_ : int = {'''x''': 3}
snake_case_ : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__magic_name__ , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
snake_case_ : str = {'''x''': 3}
snake_case_ : Optional[int] = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__magic_name__ , {'''x''': 3, '''y''': 2} )
snake_case_ : Tuple = {'''x''': 8}
snake_case_ : str = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {'''x''': 8, '''y''': 5} )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = '''test_list = [x, add_two(x)]'''
snake_case_ : Dict = {'''x''': 3}
snake_case_ : List[Any] = evaluate(__magic_name__ , {'''add_two''': add_two} , state=__magic_name__ )
self.assertListEqual(__magic_name__ , [3, 5] )
self.assertDictEqual(__magic_name__ , {'''x''': 3, '''test_list''': [3, 5]} )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = '''y = x'''
snake_case_ : List[Any] = {'''x''': 3}
snake_case_ : int = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {'''x''': 3, '''y''': 3} )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = '''test_list = [x, add_two(x)]\ntest_list[1]'''
snake_case_ : Optional[int] = {'''x''': 3}
snake_case_ : List[Any] = evaluate(__magic_name__ , {'''add_two''': add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {'''x''': 3, '''test_list''': [3, 5]} )
snake_case_ : Any = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
snake_case_ : Tuple = {'''x''': 3}
snake_case_ : List[str] = evaluate(__magic_name__ , {'''add_two''': add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = '''x = 0\nfor i in range(3):\n x = i'''
snake_case_ : int = {}
snake_case_ : Optional[Any] = evaluate(__magic_name__ , {'''range''': range} , state=__magic_name__ )
assert result == 2
self.assertDictEqual(__magic_name__ , {'''x''': 2, '''i''': 2} )
| 360
|
import random
from .binary_exp_mod import bin_exp_mod
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=1_000 ) -> str:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
snake_case_ : int = n - 1
snake_case_ : Union[str, Any] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
snake_case_ : List[Any] = 0
while count < prec:
snake_case_ : Tuple = random.randint(2 , n - 1 )
snake_case_ : List[Any] = bin_exp_mod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if b != 1:
snake_case_ : Any = True
for _ in range(_UpperCamelCase ):
if b == n - 1:
snake_case_ : Tuple = False
break
snake_case_ : List[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCAmelCase_ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 279
| 0
|
"""simple docstring"""
# using dfs for finding eulerian path traversal
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
"""simple docstring"""
A__ = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
A__ = True, True
A__ = dfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return path
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = 0
A__ = -1
for i in range(SCREAMING_SNAKE_CASE__ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
A__ = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
A__ = check_circuit_or_path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
A__ = 1
if check == 2:
A__ = odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
A__ = dfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
A__ = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
A__ = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
A__ = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
A__ = {
1: [],
2: []
# all degree is zero
}
A__ = 10
check_euler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
check_euler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
check_euler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
check_euler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
check_euler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 221
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_lowerCamelCase : str =[]
_lowerCamelCase : Dict =[]
for i in range(self.num_layers ):
_lowerCamelCase : Union[str, Any] =self.in_channels if i == 0 else self.out_channels
_lowerCamelCase : Any =FlaxResnetBlockaD(
in_channels=lowercase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : Dict =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase_ )
_lowerCamelCase : Optional[int] =resnets
_lowerCamelCase : Dict =attentions
if self.add_downsample:
_lowerCamelCase : Union[str, Any] =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=True ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Tuple =()
for resnet, attn in zip(self.resnets , self.attentions ):
_lowerCamelCase : Union[str, Any] =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
_lowerCamelCase : Union[str, Any] =attn(lowercase_ , lowercase_ , deterministic=lowercase_ )
output_states += (hidden_states,)
if self.add_downsample:
_lowerCamelCase : Optional[int] =self.downsamplers_a(lowercase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =True
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_lowerCamelCase : str =[]
for i in range(self.num_layers ):
_lowerCamelCase : Tuple =self.in_channels if i == 0 else self.out_channels
_lowerCamelCase : Any =FlaxResnetBlockaD(
in_channels=lowercase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : Union[str, Any] =resnets
if self.add_downsample:
_lowerCamelCase : List[str] =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=True ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Optional[int] =()
for resnet in self.resnets:
_lowerCamelCase : Tuple =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
output_states += (hidden_states,)
if self.add_downsample:
_lowerCamelCase : Tuple =self.downsamplers_a(lowercase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : str =[]
_lowerCamelCase : List[str] =[]
for i in range(self.num_layers ):
_lowerCamelCase : List[str] =self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCamelCase : Tuple =self.prev_output_channel if i == 0 else self.out_channels
_lowerCamelCase : List[str] =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : Tuple =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase_ )
_lowerCamelCase : int =resnets
_lowerCamelCase : Dict =attentions
if self.add_upsample:
_lowerCamelCase : str =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , lowercase_ : Any , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Union[str, Any]=True ) -> Optional[int]:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_lowerCamelCase : Optional[int] =res_hidden_states_tuple[-1]
_lowerCamelCase : Union[str, Any] =res_hidden_states_tuple[:-1]
_lowerCamelCase : Any =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowerCamelCase : Optional[Any] =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
_lowerCamelCase : List[Any] =attn(lowercase_ , lowercase_ , deterministic=lowercase_ )
if self.add_upsample:
_lowerCamelCase : Optional[Any] =self.upsamplers_a(lowercase_ )
return hidden_states
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =True
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : List[str] =[]
for i in range(self.num_layers ):
_lowerCamelCase : Tuple =self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCamelCase : int =self.prev_output_channel if i == 0 else self.out_channels
_lowerCamelCase : str =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : str =resnets
if self.add_upsample:
_lowerCamelCase : List[str] =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , lowercase_ : str , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Any=True ) -> int:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
_lowerCamelCase : List[str] =res_hidden_states_tuple[-1]
_lowerCamelCase : str =res_hidden_states_tuple[:-1]
_lowerCamelCase : List[str] =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowerCamelCase : Optional[Any] =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
if self.add_upsample:
_lowerCamelCase : Union[str, Any] =self.upsamplers_a(lowercase_ )
return hidden_states
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_lowerCamelCase : Any =[]
for _ in range(self.num_layers ):
_lowerCamelCase : Optional[int] =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase_ )
_lowerCamelCase : Tuple =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : List[Any] =resnets
_lowerCamelCase : List[str] =attentions
def __call__( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : List[str]=True ) -> int:
"""simple docstring"""
_lowerCamelCase : Dict =self.resnets[0](lowercase_ , lowercase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_lowerCamelCase : Tuple =attn(lowercase_ , lowercase_ , deterministic=lowercase_ )
_lowerCamelCase : List[str] =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
return hidden_states
| 199
| 0
|
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=99 , lowercase=13 , lowercase=16 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=2 , lowercase=32 , lowercase=4 , lowercase=4 , lowercase=30 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=None , ):
A_ : Tuple = parent
A_ : str = batch_size
A_ : int = decoder_seq_length
# For common tests
A_ : int = self.decoder_seq_length
A_ : List[str] = is_training
A_ : str = use_attention_mask
A_ : Union[str, Any] = use_labels
A_ : List[Any] = vocab_size
A_ : List[Any] = d_model
A_ : int = d_model
A_ : str = decoder_layers
A_ : Optional[Any] = decoder_layers
A_ : List[Any] = decoder_ffn_dim
A_ : Union[str, Any] = decoder_attention_heads
A_ : List[str] = decoder_attention_heads
A_ : Optional[Any] = eos_token_id
A_ : int = bos_token_id
A_ : Dict = pad_token_id
A_ : Dict = decoder_start_token_id
A_ : Optional[int] = use_cache
A_ : Any = max_position_embeddings
A_ : str = None
A_ : Union[str, Any] = decoder_seq_length
A_ : str = 2
A_ : Dict = 1
def _a (self ):
A_ : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
A_ : Dict = None
if self.use_attention_mask:
A_ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
A_ : str = None
if self.use_labels:
A_ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
A_ : Any = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _a (self , lowercase , lowercase , lowercase , lowercase , ):
A_ : List[str] = True
A_ : Optional[int] = TrOCRDecoder(config=lowercase ).to(lowercase ).eval()
A_ : Union[str, Any] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
A_ : Any = model(lowercase , use_cache=lowercase )
A_ : Any = model(lowercase )
A_ : Dict = model(lowercase , use_cache=lowercase )
self.parent.assertTrue(len(lowercase ) == len(lowercase ) )
self.parent.assertTrue(len(lowercase ) == len(lowercase ) + 1 )
A_ : Union[str, Any] = outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
A_ : Union[str, Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
A_ : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ : Optional[Any] = model(lowercase )["""last_hidden_state"""]
A_ : str = model(lowercase , past_key_values=lowercase )["""last_hidden_state"""]
# select random slice
A_ : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
A_ : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowercase , lowercase , atol=1E-3 )
def _a (self ):
A_ : Optional[Any] = self.prepare_config_and_inputs()
A_ : Tuple = config_and_inputs
A_ : Dict = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Tuple = (TrOCRForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Optional[int] = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : Tuple = False
def _a (self ):
A_ : List[Any] = TrOCRStandaloneDecoderModelTester(self , is_training=lowercase )
A_ : int = ConfigTester(self , config_class=lowercase )
def _a (self ):
pass
def _a (self ):
pass
def _a (self ):
pass
def _a (self ):
self.config_tester.run_common_tests()
def _a (self ):
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowercase )
def _a (self ):
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _a (self ):
pass
| 354
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :Any = logging.get_logger(__name__)
lowerCamelCase :List[Any] = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = 'camembert'
def __init__(self , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=None , **lowercase , ):
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A_ : List[Any] = vocab_size
A_ : int = hidden_size
A_ : Dict = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : Optional[Any] = hidden_act
A_ : str = intermediate_size
A_ : int = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : Optional[int] = type_vocab_size
A_ : int = initializer_range
A_ : str = layer_norm_eps
A_ : int = position_embedding_type
A_ : Dict = use_cache
A_ : Any = classifier_dropout
class _lowerCAmelCase ( __UpperCAmelCase ):
@property
def _a (self ):
if self.task == "multiple-choice":
A_ : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 135
| 0
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCAmelCase_ = get_logger(__name__)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ):
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
logger.info(F'''Saving model to {ckpt_dir}''' )
snake_case_ = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=SCREAMING_SNAKE_CASE__ , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE__ ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(SCREAMING_SNAKE_CASE__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
snake_case_ = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
logger.info(F'''Loading model from {input_model_file}''' )
snake_case_ = torch.load(SCREAMING_SNAKE_CASE__ )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
logger.info(F'''Loading model from {input_model_file}''' )
snake_case_ = torch.load(SCREAMING_SNAKE_CASE__ )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ = (
os.path.join(SCREAMING_SNAKE_CASE__ , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
snake_case_ = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=SCREAMING_SNAKE_CASE__ , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE__ ) , planner=DefaultLoadPlanner() , )
snake_case_ = state_dict['''model''']
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ):
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ = FSDP.optim_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
snake_case_ = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE__ ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
snake_case_ = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
snake_case_ = torch.load(SCREAMING_SNAKE_CASE__ )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
snake_case_ = (
os.path.join(SCREAMING_SNAKE_CASE__ , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
snake_case_ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE__ ) , )
snake_case_ = optim_state['''optimizer''']
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
snake_case_ = FSDP.optim_state_dict_to_load(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
optimizer.load_state_dict(SCREAMING_SNAKE_CASE__ )
| 8
|
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(SCREAMING_SNAKE_CASE__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('''This should never happen''' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowerCAmelCase_ = '''Enter the base and the power separated by a comma: '''
lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''','''))
lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowerCAmelCase_ = res(xa, ya)
lowerCAmelCase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 8
| 1
|
def _snake_case( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(SCREAMING_SNAKE_CASE__ ) * abs(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 365
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
"""simple docstring"""
def __init__( self : Any,lowercase_ : Optional[int],lowercase_ : Optional[int]=1_3,lowercase_ : int=7,lowercase_ : List[str]=True,lowercase_ : str=True,lowercase_ : List[str]=True,lowercase_ : Optional[Any]=True,lowercase_ : Dict=9_9,lowercase_ : Dict=2_4,lowercase_ : Union[str, Any]=2,lowercase_ : str=6,lowercase_ : Dict=3_7,lowercase_ : Optional[Any]="gelu",lowercase_ : Any=0.1,lowercase_ : Any=0.1,lowercase_ : Any=5_1_2,lowercase_ : Dict=1_6,lowercase_ : List[str]=2,lowercase_ : Dict=0.02,lowercase_ : Any=3,lowercase_ : Dict=None,lowercase_ : List[str]=1_0_0_0,)-> Optional[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = scope
A__ = range_bbox
def snake_case__ ( self : List[Any] )-> Tuple:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
A__ = ids_tensor([self.batch_size, self.seq_length, 4],self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A__ = bbox[i, j, 3]
A__ = bbox[i, j, 1]
A__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A__ = bbox[i, j, 2]
A__ = bbox[i, j, 0]
A__ = t
A__ = None
if self.use_input_mask:
A__ = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
A__ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self : Dict )-> int:
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,)
def snake_case__ ( self : Optional[Any],lowercase_ : Tuple,lowercase_ : str,lowercase_ : Optional[int],lowercase_ : Optional[Any],lowercase_ : str,lowercase_ : List[str],lowercase_ : Tuple,)-> Optional[Any]:
'''simple docstring'''
A__ = LiltModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,bbox=lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_ )
A__ = model(lowercase_,bbox=lowercase_,token_type_ids=lowercase_ )
A__ = model(lowercase_,bbox=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape,(self.batch_size, self.hidden_size) )
def snake_case__ ( self : Any,lowercase_ : Dict,lowercase_ : List[Any],lowercase_ : List[str],lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : List[Any],)-> List[str]:
'''simple docstring'''
A__ = self.num_labels
A__ = LiltForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(
lowercase_,bbox=lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : int,lowercase_ : Union[str, Any],lowercase_ : int,lowercase_ : Tuple,lowercase_ : Any,lowercase_ : Optional[int],lowercase_ : Tuple,lowercase_ : List[str],)-> Any:
'''simple docstring'''
A__ = LiltForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(
lowercase_,bbox=lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,start_positions=lowercase_,end_positions=lowercase_,)
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def snake_case__ ( self : Optional[int] )-> Tuple:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
def snake_case__ ( self : List[str],lowercase_ : int,lowercase_ : List[str],lowercase_ : str,lowercase_ : Optional[Any],lowercase_ : Optional[Any] )-> Any:
'''simple docstring'''
return True
def snake_case__ ( self : int )-> Tuple:
'''simple docstring'''
A__ = LiltModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,hidden_size=3_7 )
def snake_case__ ( self : List[Any] )-> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Dict )-> Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : str )-> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
def snake_case__ ( self : List[Any] )-> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
@slow
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = LiltModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
@slow
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[Any] )-> Dict:
'''simple docstring'''
A__ = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(lowercase_ )
A__ = torch.tensor([[1, 2]],device=lowercase_ )
A__ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]],device=lowercase_ )
# forward pass
with torch.no_grad():
A__ = model(input_ids=lowercase_,bbox=lowercase_ )
A__ = torch.Size([1, 2, 7_6_8] )
A__ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]],device=lowercase_,)
self.assertTrue(outputs.last_hidden_state.shape,lowercase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3],lowercase_,atol=1E-3 ) )
| 282
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
__A = {
"gpt-neox-20b": 20_48,
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="<|endoftext|>" , lowerCamelCase__="<|endoftext|>" , lowerCamelCase__="<|endoftext|>" , lowerCamelCase__=False , **lowerCamelCase__ , ) -> int:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
__lowerCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCamelCase__ ) != add_prefix_space:
__lowerCamelCase = getattr(lowerCamelCase__ , pre_tok_state.pop('type' ) )
__lowerCamelCase = add_prefix_space
__lowerCamelCase = pre_tok_class(**lowerCamelCase__ )
__lowerCamelCase = add_prefix_space
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
__lowerCamelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> List[int]:
'''simple docstring'''
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) + [self.eos_token_id] )
if len(lowerCamelCase__ ) > self.model_max_length:
__lowerCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 90
|
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''EncodecFeatureExtractor'''
snake_case_ = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = self.feature_extractor
__lowerCamelCase = False
def lowercase_ ( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=lowerCamelCase__ , language=lowerCamelCase__ , no_timestamps=lowerCamelCase__ )
def __call__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase__ , **lowerCamelCase__ )
__lowerCamelCase = kwargs.pop('audio' , lowerCamelCase__ )
__lowerCamelCase = kwargs.pop('sampling_rate' , lowerCamelCase__ )
__lowerCamelCase = kwargs.pop('text' , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__lowerCamelCase = args[0]
__lowerCamelCase = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if text is not None:
__lowerCamelCase = self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ )
if audio is not None:
__lowerCamelCase = self.feature_extractor(lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__lowerCamelCase = audio_inputs['input_values']
if "padding_mask" in audio_inputs:
__lowerCamelCase = audio_inputs['padding_mask']
return inputs
def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = kwargs.pop('audio' , lowerCamelCase__ )
__lowerCamelCase = kwargs.pop('padding_mask' , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__lowerCamelCase = args[0]
__lowerCamelCase = args[1:]
if audio_values is not None:
return self._decode_audio(lowerCamelCase__ , padding_mask=lowerCamelCase__ )
else:
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[np.ndarray]:
'''simple docstring'''
__lowerCamelCase = to_numpy(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = audio_values.shape
if padding_mask is None:
return list(lowerCamelCase__ )
__lowerCamelCase = to_numpy(lowerCamelCase__ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__lowerCamelCase = seq_len - padding_mask.shape[-1]
__lowerCamelCase = 1 - self.feature_extractor.padding_value
__lowerCamelCase = np.pad(lowerCamelCase__ , ((0, 0), (0, difference)) , 'constant' , constant_values=lowerCamelCase__ )
__lowerCamelCase = audio_values.tolist()
for i in range(lowerCamelCase__ ):
__lowerCamelCase = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__lowerCamelCase = sliced_audio.reshape(lowerCamelCase__ , -1 )
return audio_values
| 90
| 1
|
'''simple docstring'''
def _snake_case ( A = 10 , A = 22 ) -> int:
lowerCAmelCase__ = range(1 , A )
lowerCAmelCase__ = range(1 , A )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f"""{solution(10, 22) = }""")
| 228
|
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__UpperCAmelCase = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
__UpperCAmelCase = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
__UpperCAmelCase = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _snake_case ( A , A ) -> List[Any]:
return float((preds == labels).mean() )
def _snake_case ( A , A , A="binary" ) -> int:
lowerCAmelCase__ = simple_accuracy(A , A )
lowerCAmelCase__ = float(fa_score(y_true=A , y_pred=A , average=A ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case ( A , A ) -> List[Any]:
lowerCAmelCase__ = {}
for id_pred, label in zip(A , A ):
lowerCAmelCase__ = F"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}"""
lowerCAmelCase__ = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase__ = [(pred, label)]
lowerCAmelCase__ , lowerCAmelCase__ = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase__ , lowerCAmelCase__ = zip(*A )
lowerCAmelCase__ = fa_score(y_true=A , y_pred=A , average='''macro''' )
fas.append(A )
lowerCAmelCase__ = int(sum(pred == label for pred, label in preds_labels ) == len(A ) )
ems.append(A )
lowerCAmelCase__ = float(sum(A ) / len(A ) )
lowerCAmelCase__ = sum(A ) / len(A )
lowerCAmelCase__ = float(fa_score(y_true=A , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(lowerCamelCase_ , lowerCamelCase_ )}
elif self.config_name == "cb":
return acc_and_fa(lowerCamelCase_ , lowerCamelCase_ , fa_avg='''macro''' )
elif self.config_name == "record":
lowerCAmelCase__ = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
lowerCAmelCase__ = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(lowerCamelCase_ , lowerCamelCase_ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(lowerCamelCase_ , lowerCamelCase_ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 228
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = """▁"""
lowerCamelCase__ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
lowerCamelCase__ = {"""vinai/bartpho-syllable""": 1024}
class SCREAMING_SNAKE_CASE ( _a ):
__lowerCamelCase : Any =VOCAB_FILES_NAMES
__lowerCamelCase : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Any =['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Union[str, Any]="<s>" , __lowercase : str="</s>" , __lowercase : Optional[int]="</s>" , __lowercase : Optional[Any]="<s>" , __lowercase : List[str]="<unk>" , __lowercase : Optional[Any]="<pad>" , __lowercase : Dict="<mask>" , __lowercase : List[Any] = None , **__lowercase : List[str] , ):
'''simple docstring'''
__a = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
__a = vocab_file
__a = monolingual_vocab_file
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowercase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__a = {}
__a = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowercase ) not in self.fairseq_tokens_to_ids:
__a = cnt
cnt += 1
with open(__lowercase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
__a = line.strip().split()[0]
__a = len(self.fairseq_tokens_to_ids )
if str(__lowercase ) not in self.fairseq_tokens_to_ids:
__a = len(self.fairseq_tokens_to_ids )
__a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Optional[int] ):
'''simple docstring'''
__a = self.__dict__.copy()
__a = None
__a = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Any , __lowercase : List[str] ):
'''simple docstring'''
__a = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Dict , __lowercase : List[str] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self : Dict , __lowercase : Dict , __lowercase : str = None , __lowercase : str = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
def UpperCamelCase_ ( self : int , __lowercase : int , __lowercase : int = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Optional[Any] ):
'''simple docstring'''
return self.sp_model.encode(__lowercase , out_type=__lowercase )
def UpperCamelCase_ ( self : Tuple , __lowercase : Dict ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def UpperCamelCase_ ( self : List[str] , __lowercase : Any ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def UpperCamelCase_ ( self : Optional[int] , __lowercase : int ):
'''simple docstring'''
__a = ''''''.join(__lowercase ).replace(__lowercase , """ """ ).strip()
return out_string
def UpperCamelCase_ ( self : List[str] , __lowercase : Dict , __lowercase : Union[str, Any] = None ):
'''simple docstring'''
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__a = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , """wb""" ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowercase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowercase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowercase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"{str(__lowercase )} \n" )
return out_vocab_file, out_monolingual_vocab_file
| 302
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=32 , __magic_name__=True , ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Union[str, Any] = num_channels
snake_case_ : Optional[Any] = image_size
snake_case_ : int = min_resolution
snake_case_ : Any = max_resolution
snake_case_ : Tuple = do_resize
snake_case_ : str = size_divisor
snake_case_ : Optional[Any] = do_rescale
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[Any] = GLPNImageProcessor if is_vision_available() else None
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = GLPNImageProcessingTester(self )
@property
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''size_divisor''' ) )
self.assertTrue(hasattr(__magic_name__ , '''resample''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_rescale''' ) )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 279
| 0
|
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
UpperCAmelCase = 'bert-base-cased'
UpperCAmelCase = 'google/pegasus-xsum'
UpperCAmelCase = [' Sam ate lunch today.', 'Sams lunch ingredients.']
UpperCAmelCase = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
UpperCAmelCase = 'patrickvonplaten/t5-tiny-random'
UpperCAmelCase = 'sshleifer/bart-tiny-random'
UpperCAmelCase = 'sshleifer/tiny-mbart'
UpperCAmelCase = 'sshleifer/tiny-marian-en-de'
def _snake_case ( _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : list ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = """\n""".join(_SCREAMING_SNAKE_CASE )
Path(_SCREAMING_SNAKE_CASE ).open("""w""" ).writelines(_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(_SCREAMING_SNAKE_CASE , f'{split}.source' ) , _SCREAMING_SNAKE_CASE )
_dump_articles(os.path.join(_SCREAMING_SNAKE_CASE , f'{split}.target' ) , _SCREAMING_SNAKE_CASE )
return tmp_dir
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __snake_case ( self , A_ ) -> int:
lowerCAmelCase = AutoTokenizer.from_pretrained(A_ )
lowerCAmelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCAmelCase = max(len(tokenizer.encode(A_ ) ) for a in ARTICLES )
lowerCAmelCase = max(len(tokenizer.encode(A_ ) ) for a in SUMMARIES )
lowerCAmelCase = 4
lowerCAmelCase = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowerCAmelCase, lowerCAmelCase = """ro_RO""", """de_DE""" # ignored for all but mbart, but never causes error.
lowerCAmelCase = SeqaSeqDataset(
A_ , data_dir=A_ , type_path="""train""" , max_source_length=A_ , max_target_length=A_ , src_lang=A_ , tgt_lang=A_ , )
lowerCAmelCase = DataLoader(A_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(A_ , A_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowerCAmelCase = shift_tokens_right(batch["""labels"""] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __snake_case ( self , A_ ) -> Optional[int]:
lowerCAmelCase = AutoTokenizer.from_pretrained(A_ )
lowerCAmelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCAmelCase = max(len(tokenizer.encode(A_ ) ) for a in ARTICLES )
lowerCAmelCase = max(len(tokenizer.encode(A_ ) ) for a in SUMMARIES )
lowerCAmelCase = 4
lowerCAmelCase = LegacySeqaSeqDataset(
A_ , data_dir=A_ , type_path="""train""" , max_source_length=20 , max_target_length=A_ , )
lowerCAmelCase = DataLoader(A_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = AutoTokenizer.from_pretrained("""facebook/mbart-large-cc25""" )
lowerCAmelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowerCAmelCase = tmp_dir.joinpath("""train.source""" ).open().readlines()
lowerCAmelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(A_ , A_ , 128 , A_ )
lowerCAmelCase = {x.name for x in tmp_dir.iterdir()}
lowerCAmelCase = {x.name for x in save_dir.iterdir()}
lowerCAmelCase = save_dir.joinpath("""train.source""" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(A_ ) < len(A_ )
assert len(A_ ) == 1
assert len(packed_examples[0] ) == sum(len(A_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="""This test requires fairseq""" )
def __snake_case ( self ) -> Optional[Any]:
if not FAIRSEQ_AVAILABLE:
return
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = self._get_dataset(max_len=64 )
lowerCAmelCase = 64
lowerCAmelCase = ds.make_dynamic_sampler(A_ , required_batch_size_multiple=A_ )
lowerCAmelCase = [len(A_ ) for x in batch_sampler]
assert len(set(A_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(A_ ) == len(A_ ) # no dropped or added examples
lowerCAmelCase = DataLoader(A_ , batch_sampler=A_ , collate_fn=ds.collate_fn , num_workers=2 )
lowerCAmelCase = []
lowerCAmelCase = []
for batch in data_loader:
lowerCAmelCase = batch["""input_ids"""].shape
lowerCAmelCase = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowerCAmelCase = np.product(batch["""input_ids"""].shape )
num_src_per_batch.append(A_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(A_ )
assert num_src_per_batch[0] == max(A_ )
if failures:
raise AssertionError(f'too many tokens in {len(A_ )} batches' )
def __snake_case ( self ) -> Dict:
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = self._get_dataset(max_len=512 )
lowerCAmelCase = 2
lowerCAmelCase = ds.make_sortish_sampler(A_ , shuffle=A_ )
lowerCAmelCase = DataLoader(A_ , batch_size=A_ , collate_fn=ds.collate_fn , num_workers=2 )
lowerCAmelCase = DataLoader(A_ , batch_size=A_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=A_ )
lowerCAmelCase = tokenizer.pad_token_id
def count_pad_tokens(A_ , A_="input_ids" ):
return [batch[k].eq(A_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(A_ , k="""labels""" ) ) < sum(count_pad_tokens(A_ , k="""labels""" ) )
assert sum(count_pad_tokens(A_ ) ) < sum(count_pad_tokens(A_ ) )
assert len(A_ ) == len(A_ )
def __snake_case ( self , A_=1000 , A_=128 ) -> Any:
if os.getenv("""USE_REAL_DATA""" , A_ ):
lowerCAmelCase = """examples/seq2seq/wmt_en_ro"""
lowerCAmelCase = max_len * 2 * 64
if not Path(A_ ).joinpath("""train.len""" ).exists():
save_len_file(A_ , A_ )
else:
lowerCAmelCase = """examples/seq2seq/test_data/wmt_en_ro"""
lowerCAmelCase = max_len * 4
save_len_file(A_ , A_ )
lowerCAmelCase = AutoTokenizer.from_pretrained(A_ )
lowerCAmelCase = SeqaSeqDataset(
A_ , data_dir=A_ , type_path="""train""" , max_source_length=A_ , max_target_length=A_ , n_obs=A_ , )
return ds, max_tokens, tokenizer
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = self._get_dataset()
lowerCAmelCase = set(DistributedSortishSampler(A_ , 256 , num_replicas=2 , rank=0 , add_extra_examples=A_ ) )
lowerCAmelCase = set(DistributedSortishSampler(A_ , 256 , num_replicas=2 , rank=1 , add_extra_examples=A_ ) )
assert idsa.intersection(A_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __snake_case ( self , A_ ) -> str:
lowerCAmelCase = AutoTokenizer.from_pretrained(A_ , use_fast=A_ )
if tok_name == MBART_TINY:
lowerCAmelCase = SeqaSeqDataset(
A_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , src_lang="""EN""" , tgt_lang="""FR""" , )
lowerCAmelCase = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowerCAmelCase = SeqaSeqDataset(
A_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , )
lowerCAmelCase = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(A_ ) == 1 if tok_name == BART_TINY else len(A_ ) == 0
| 187
|
'''simple docstring'''
def _snake_case ( _SCREAMING_SNAKE_CASE : list ) -> list:
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) < 2:
return collection
def circle_sort_util(_SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> bool:
lowerCAmelCase = False
if low == high:
return swapped
lowerCAmelCase = low
lowerCAmelCase = high
while left < right:
if collection[left] > collection[right]:
lowerCAmelCase, lowerCAmelCase = (
collection[right],
collection[left],
)
lowerCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowerCAmelCase, lowerCAmelCase = (
collection[right + 1],
collection[left],
)
lowerCAmelCase = True
lowerCAmelCase = low + int((high - low) / 2 )
lowerCAmelCase = circle_sort_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = circle_sort_util(_SCREAMING_SNAKE_CASE , mid + 1 , _SCREAMING_SNAKE_CASE )
return swapped or left_swap or right_swap
lowerCAmelCase = True
while is_not_sorted is True:
lowerCAmelCase = circle_sort_util(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
return collection
if __name__ == "__main__":
UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 187
| 1
|
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
_UpperCAmelCase = getLogger(__name__)
_UpperCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase = 8 , lowercase = DEFAULT_DEVICE , lowercase=False , lowercase="summarization" , lowercase=None , **lowercase , ):
SCREAMING_SNAKE_CASE_: Optional[Any] =Path(_lowerCamelCase ).open("""w""" , encoding="""utf-8""" )
SCREAMING_SNAKE_CASE_: Optional[int] =str(_lowerCamelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =AutoModelForSeqaSeqLM.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
if fpaa:
SCREAMING_SNAKE_CASE_: Any =model.half()
SCREAMING_SNAKE_CASE_: str =AutoTokenizer.from_pretrained(_lowerCamelCase )
logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
SCREAMING_SNAKE_CASE_: List[str] =time.time()
# update config with task specific params
use_task_specific_params(_lowerCamelCase , _lowerCamelCase )
if prefix is None:
SCREAMING_SNAKE_CASE_: Dict =prefix or getattr(model.config , """prefix""" , """""" ) or ""
for examples_chunk in tqdm(list(chunks(_lowerCamelCase , _lowerCamelCase ) ) ):
SCREAMING_SNAKE_CASE_: Optional[Any] =[prefix + text for text in examples_chunk]
SCREAMING_SNAKE_CASE_: Any =tokenizer(_lowerCamelCase , return_tensors="""pt""" , truncation=_lowerCamelCase , padding="""longest""" ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE_: Dict =model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE_: Dict =tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
SCREAMING_SNAKE_CASE_: Tuple =int(time.time() - start_time ) # seconds
SCREAMING_SNAKE_CASE_: Any =len(_lowerCamelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def __magic_name__ ( ):
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def __magic_name__ ( lowercase=True ):
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
parser.add_argument("""model_name""" , type=_lowerCamelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""" , type=_lowerCamelCase , help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""" , type=_lowerCamelCase , help="""where to save summaries""" )
parser.add_argument("""--reference_path""" , type=_lowerCamelCase , required=_lowerCamelCase , help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""" , type=_lowerCamelCase , required=_lowerCamelCase , default="""metrics.json""" , help="""where to save metrics""" )
parser.add_argument("""--device""" , type=_lowerCamelCase , required=_lowerCamelCase , default=_lowerCamelCase , help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""" , type=_lowerCamelCase , required=_lowerCamelCase , default=_lowerCamelCase , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""" , type=_lowerCamelCase , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=_lowerCamelCase , default=8 , required=_lowerCamelCase , help="""batch size""" )
parser.add_argument(
"""--n_obs""" , type=_lowerCamelCase , default=-1 , required=_lowerCamelCase , help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""" , nargs="""?""" , type=_lowerCamelCase , const=datetime_now() , help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
SCREAMING_SNAKE_CASE_: str =parser.parse_known_args()
SCREAMING_SNAKE_CASE_: List[str] =parse_numeric_n_bool_cl_kwargs(_lowerCamelCase )
if parsed_args and verbose:
print(f'''parsed the following generate kwargs: {parsed_args}''' )
SCREAMING_SNAKE_CASE_: Optional[Any] =[" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
SCREAMING_SNAKE_CASE_: List[Any] =examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_lowerCamelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can't mix --fp16 and --device cpu""" )
SCREAMING_SNAKE_CASE_: Optional[int] =generate_summaries_or_translations(
_lowerCamelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_lowerCamelCase , )
if args.reference_path is None:
return {}
# Compute scores
SCREAMING_SNAKE_CASE_: Union[str, Any] =calculate_bleu if "translation" in args.task else calculate_rouge
SCREAMING_SNAKE_CASE_: int =[x.rstrip() for x in open(args.save_path ).readlines()]
SCREAMING_SNAKE_CASE_: Tuple =[x.rstrip() for x in open(args.reference_path ).readlines()][: len(_lowerCamelCase )]
SCREAMING_SNAKE_CASE_: dict =score_fn(_lowerCamelCase , _lowerCamelCase )
scores.update(_lowerCamelCase )
if args.dump_args:
scores.update(_lowerCamelCase )
if args.info:
SCREAMING_SNAKE_CASE_: Dict =args.info
if verbose:
print(_lowerCamelCase )
if args.score_path is not None:
json.dump(_lowerCamelCase , open(args.score_path , """w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 173
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: PreTrainedTokenizer , _lowerCamelCase: int , _lowerCamelCase: Optional[int] = None , ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : Optional[int] = {}
if train_file is not None:
__lowerCamelCase : List[Any] = [train_file]
if eval_file is not None:
__lowerCamelCase : List[Any] = [eval_file]
if test_file is not None:
__lowerCamelCase : Optional[int] = [test_file]
__lowerCamelCase : Optional[int] = datasets.load_dataset("csv" , data_files=_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = list(ds[list(files.keys() )[0]].features.keys() )
__lowerCamelCase : Optional[Any] = features_name.pop(_lowerCamelCase )
__lowerCamelCase : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowerCamelCase : List[str] = {label: i for i, label in enumerate(_lowerCamelCase )}
__lowerCamelCase : Dict = tokenizer.model_input_names
__lowerCamelCase : int = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
__lowerCamelCase : Optional[int] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
__lowerCamelCase : Optional[Any] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowerCamelCase : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
__lowerCamelCase : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowerCamelCase : str = {k: v for k, v in ex.items() if k in input_names}
__lowerCamelCase : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowerCamelCase : List[Any] = {k: v for k, v in ex.items() if k in input_names}
__lowerCamelCase : Dict = labelaid[ex[label_name]]
yield (d, label)
__lowerCamelCase : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowerCamelCase : int = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowerCamelCase : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowerCamelCase : List[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowerCamelCase : int = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowerCamelCase : Optional[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__A = logging.getLogger(__name__)
@dataclass
class _snake_case :
snake_case__ = field(metadata={"help": "Which column contains the label"} )
snake_case__ = field(default=a__ , metadata={"help": "The path of the training file"} )
snake_case__ = field(default=a__ , metadata={"help": "The path of the development file"} )
snake_case__ = field(default=a__ , metadata={"help": "The path of the test file"} )
snake_case__ = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ = field(
default=a__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _snake_case :
snake_case__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case__ = field(
default=a__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case__ = field(
default=a__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case__ = field(default=a__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case__ = field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def lowercase_ ( ) -> str:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowerCamelCase : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowerCamelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase: EvalPrediction ) -> Dict:
__lowerCamelCase : List[str] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowerCamelCase : Dict = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCamelCase : int = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowerCamelCase : Any = trainer.evaluate()
__lowerCamelCase : List[str] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 135
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__UpperCAmelCase ={
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Any ="facebook/nllb-200-distilled-600M"
lowerCamelCase : Optional[Any] =(
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
lowerCamelCase : Tuple ="translator"
lowerCamelCase : Any =AutoTokenizer
lowerCamelCase : Dict =AutoModelForSeqaSeqLM
lowerCamelCase : Union[str, Any] =LANGUAGE_CODES
lowerCamelCase : Any =["text", "text", "text"]
lowerCamelCase : Union[str, Any] =["text"]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Any , a : List[str] , a : Any ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
__lowerCamelCase = self.lang_to_code[src_lang]
__lowerCamelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
a , return_tensors='''pt''' , src_lang=a , tgt_lang=a )
def SCREAMING_SNAKE_CASE__ ( self : str , a : Optional[Any] ):
"""simple docstring"""
return self.model.generate(**a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : str ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=a )
| 237
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : List[Any] ="unispeech-sat"
def __init__( self : Dict , a : str=32 , a : Any=7_68 , a : Optional[Any]=12 , a : Optional[int]=12 , a : int=30_72 , a : int="gelu" , a : Dict=0.1 , a : Dict=0.1 , a : List[Any]=0.1 , a : Tuple=0.0 , a : Optional[Any]=0.0 , a : Tuple=0.1 , a : List[Any]=0.1 , a : str=0.02 , a : List[Any]=1e-5 , a : int="group" , a : Union[str, Any]="gelu" , a : Optional[int]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , a : List[Any]=(5, 2, 2, 2, 2, 2, 2) , a : int=(10, 3, 3, 3, 3, 2, 2) , a : Optional[Any]=False , a : Any=1_28 , a : Tuple=16 , a : str=False , a : Optional[Any]=True , a : Dict=0.05 , a : List[Any]=10 , a : Any=2 , a : Optional[Any]=0.0 , a : Optional[Any]=10 , a : Any=0 , a : Any=3_20 , a : str=2 , a : List[str]=0.1 , a : List[str]=1_00 , a : List[str]=2_56 , a : str=2_56 , a : Dict=0.1 , a : Optional[Any]="mean" , a : str=False , a : Tuple=False , a : Optional[Any]=2_56 , a : int=(5_12, 5_12, 5_12, 5_12, 15_00) , a : int=(5, 3, 3, 1, 1) , a : Any=(1, 2, 3, 1, 1) , a : Union[str, Any]=5_12 , a : Optional[int]=0 , a : Optional[int]=1 , a : Optional[int]=2 , a : int=5_04 , **a : Dict , ):
"""simple docstring"""
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
__lowerCamelCase = hidden_size
__lowerCamelCase = feat_extract_norm
__lowerCamelCase = feat_extract_activation
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = conv_bias
__lowerCamelCase = num_conv_pos_embeddings
__lowerCamelCase = num_conv_pos_embedding_groups
__lowerCamelCase = len(self.conv_dim )
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = feat_proj_dropout
__lowerCamelCase = final_dropout
__lowerCamelCase = layerdrop
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = vocab_size
__lowerCamelCase = num_clusters
__lowerCamelCase = do_stable_layer_norm
__lowerCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowerCamelCase = num_codevectors_per_group
__lowerCamelCase = num_codevector_groups
__lowerCamelCase = contrastive_logits_temperature
__lowerCamelCase = feat_quantizer_dropout
__lowerCamelCase = num_negatives
__lowerCamelCase = codevector_dim
__lowerCamelCase = proj_codevector_dim
__lowerCamelCase = diversity_loss_weight
# ctc loss
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 237
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = "distilbert"
__UpperCamelCase = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self :Union[str, Any] , snake_case :List[Any]=30_522 , snake_case :Dict=512 , snake_case :int=False , snake_case :int=6 , snake_case :Union[str, Any]=12 , snake_case :Union[str, Any]=768 , snake_case :Union[str, Any]=4 * 768 , snake_case :Dict=0.1 , snake_case :List[Any]=0.1 , snake_case :Dict="gelu" , snake_case :Tuple=0.02 , snake_case :str=0.1 , snake_case :str=0.2 , snake_case :List[str]=0 , **snake_case :Tuple , ):
'''simple docstring'''
A_ : str = vocab_size
A_ : Optional[Any] = max_position_embeddings
A_ : Any = sinusoidal_pos_embds
A_ : List[str] = n_layers
A_ : Dict = n_heads
A_ : Any = dim
A_ : Union[str, Any] = hidden_dim
A_ : Union[str, Any] = dropout
A_ : Tuple = attention_dropout
A_ : Optional[Any] = activation
A_ : List[Any] = initializer_range
A_ : List[str] = qa_dropout
A_ : Union[str, Any] = seq_classif_dropout
super().__init__(**snake_case , pad_token_id=snake_case )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
A_ : Union[str, Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 300
|
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , lowercase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(lowercase )
def A ( self : Optional[int] , lowercase : torch.FloatTensor , lowercase : Union[torch.Tensor, float, int] , lowercase : torch.Tensor , lowercase : List[torch.tensor] , lowercase : List[float] , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[Dict[str, Any]] = None , lowercase : bool = False , lowercase : bool = True , ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowercase , lowercase , self.nets ) ):
_snake_case , _snake_case = controlnet(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# merge samples
if i == 0:
_snake_case , _snake_case = down_samples, mid_sample
else:
_snake_case = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase , lowercase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = True , lowercase : Callable = None , lowercase : bool = False , lowercase : Optional[str] = None , ):
'''simple docstring'''
_snake_case = 0
_snake_case = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase , is_main_process=lowercase , save_function=lowercase , safe_serialization=lowercase , variant=lowercase , )
idx += 1
_snake_case = model_path_to_save + f'''_{idx}'''
@classmethod
def A ( cls : Any , lowercase : Optional[Union[str, os.PathLike]] , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = 0
_snake_case = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case = pretrained_model_path
while os.path.isdir(lowercase ):
_snake_case = ControlNetModel.from_pretrained(lowercase , **lowercase )
controlnets.append(lowercase )
idx += 1
_snake_case = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(lowercase )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowercase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(lowercase )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowercase )
| 282
| 0
|
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCAmelCase_ : Dict = '''\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
'''
UpperCAmelCase_ : str = '''\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
'''
UpperCAmelCase_ : List[Any] = '''
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"precision": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'precision@10\': 1.0}
'''
def UpperCamelCase ( _A : Dict , _A : Tuple )-> List[str]:
"""simple docstring"""
return float((preds == labels).mean() )
def UpperCamelCase ( _A : Optional[int] , _A : Union[str, Any] )-> Dict:
"""simple docstring"""
A__ = simple_accuracy(lowercase_ , lowercase_ )
A__ = float(fa_score(y_true=lowercase_ , y_pred=lowercase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCamelCase ( _A : Optional[int] , _A : Any )-> Optional[int]:
"""simple docstring"""
A__ = np.array(lowercase_ )
A__ = np.array(lowercase_ )
A__ = en_sentvecs.shape[0]
# mean centering
A__ = en_sentvecs - np.mean(lowercase_ , axis=0 )
A__ = in_sentvecs - np.mean(lowercase_ , axis=0 )
A__ = cdist(lowercase_ , lowercase_ , "cosine" )
A__ = np.array(range(lowercase_ ) )
A__ = sim.argsort(axis=1 )[:, :10]
A__ = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def __A ( self ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowerCamelCase__ , lowerCamelCase__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowerCamelCase__ , lowerCamelCase__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
| 357
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
UpperCAmelCase_ : Dict = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
UpperCAmelCase_ : Any = {
"169M": 768,
"430M": 1_024,
"1B5": 2_048,
"3B": 2_560,
"7B": 4_096,
"14B": 5_120,
}
def UpperCamelCase ( _A : Dict )-> Optional[int]:
"""simple docstring"""
A__ = list(state_dict.keys() )
for name in state_dict_keys:
A__ = state_dict.pop(_A )
# emb -> embedding
if name.startswith("emb." ):
A__ = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
A__ = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
A__ = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , _A )
# ffn -> feed_forward
A__ = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , _A )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
A__ = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
A__ = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
A__ = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
A__ = "rwkv." + name
A__ = weight
return state_dict
def UpperCamelCase ( _A : str , _A : List[Any] , _A : List[Any] , _A : int=None , _A : List[str]=None , _A : Dict=False , _A : List[Any]=None )-> str:
"""simple docstring"""
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
A__ = 50277
A__ = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
A__ = PreTrainedTokenizerFast(tokenizer_file=_A )
A__ = len(_A )
tokenizer.save_pretrained(_A )
# 2. Build the config
A__ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
A__ = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
A__ = RwkvConfig(
vocab_size=_A , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_A )
# 3. Download model file then convert state_dict
A__ = hf_hub_download(_A , _A )
A__ = torch.load(_A , map_location="cpu" )
A__ = convert_state_dict(_A )
# 4. Split in shards and save
A__ , A__ = shard_checkpoint(_A )
for shard_file, shard in shards.items():
torch.save(_A , os.path.join(_A , _A ) )
if index is not None:
A__ = os.path.join(_A , _A )
# Save the index as well
with open(_A , "w" , encoding="utf-8" ) as f:
A__ = json.dumps(_A , indent=2 , sort_keys=_A ) + "\n"
f.write(_A )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
A__ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
A__ = torch.load(os.path.join(_A , _A ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_A , _A ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
A__ = AutoModelForCausalLM.from_pretrained(_A )
model.push_to_hub(_A , max_shard_size="2GB" )
tokenizer.push_to_hub(_A )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 198
| 0
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class __lowerCAmelCase :
def __init__( self :List[str] , __magic_name__ :Collection[float] | None = None ):
'''simple docstring'''
if components is None:
a = []
a = list(__magic_name__ )
def __len__( self :str ):
'''simple docstring'''
return len(self.__components )
def __str__( self :Union[str, Any] ):
'''simple docstring'''
return "(" + ",".join(map(__magic_name__ , self.__components ) ) + ")"
def __add__( self :int , __magic_name__ :Vector ):
'''simple docstring'''
a = len(self )
if size == len(__magic_name__ ):
a = [self.__components[i] + other.component(__magic_name__ ) for i in range(__magic_name__ )]
return Vector(__magic_name__ )
else:
raise Exception("""must have the same size""" )
def __sub__( self :Dict , __magic_name__ :Vector ):
'''simple docstring'''
a = len(self )
if size == len(__magic_name__ ):
a = [self.__components[i] - other.component(__magic_name__ ) for i in range(__magic_name__ )]
return Vector(__magic_name__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self :Optional[int] , __magic_name__ :float ):
'''simple docstring'''
...
@overload
def __mul__( self :Optional[Any] , __magic_name__ :Vector ):
'''simple docstring'''
...
def __mul__( self :int , __magic_name__ :float | Vector ):
'''simple docstring'''
if isinstance(__magic_name__ , (float, int) ):
a = [c * other for c in self.__components]
return Vector(__magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ) and len(self ) == len(__magic_name__ ):
a = len(self )
a = [self.__components[i] * other.component(__magic_name__ ) for i in range(__magic_name__ )]
return sum(__magic_name__ )
else: # error case
raise Exception("""invalid operand!""" )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
return Vector(self.__components )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :int ):
'''simple docstring'''
if isinstance(__magic_name__ , __magic_name__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def lowerCamelCase__ ( self :str , __magic_name__ :int , __magic_name__ :float ):
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
a = value
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
a = [c**2 for c in self.__components]
return math.sqrt(sum(__magic_name__ ) )
def lowerCamelCase__ ( self :int , __magic_name__ :Vector , __magic_name__ :bool = False ):
'''simple docstring'''
a = self * other
a = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __A ( __lowerCamelCase ) -> Vector:
assert isinstance(__lowerCamelCase , __lowerCamelCase )
return Vector([0] * dimension )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Vector:
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (isinstance(__lowerCamelCase , __lowerCamelCase ))
a = [0] * dimension
a = 1
return Vector(__lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Vector:
assert (
isinstance(__lowerCamelCase , __lowerCamelCase )
and isinstance(__lowerCamelCase , __lowerCamelCase )
and (isinstance(__lowerCamelCase , (int, float) ))
)
return x * scalar + y
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Vector:
random.seed(__lowerCamelCase )
a = [random.randint(__lowerCamelCase , __lowerCamelCase ) for _ in range(__lowerCamelCase )]
return Vector(__lowerCamelCase )
class __lowerCAmelCase :
def __init__( self :Dict , __magic_name__ :list[list[float]] , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
a = matrix
a = w
a = h
def __str__( self :Dict ):
'''simple docstring'''
a = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self :Optional[Any] , __magic_name__ :Matrix ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
a = []
for i in range(self.__height ):
a = [
self.__matrix[i][j] + other.component(__magic_name__ , __magic_name__ )
for j in range(self.__width )
]
matrix.append(__magic_name__ )
return Matrix(__magic_name__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self :List[str] , __magic_name__ :Matrix ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
a = []
for i in range(self.__height ):
a = [
self.__matrix[i][j] - other.component(__magic_name__ , __magic_name__ )
for j in range(self.__width )
]
matrix.append(__magic_name__ )
return Matrix(__magic_name__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self :List[str] , __magic_name__ :float ):
'''simple docstring'''
...
@overload
def __mul__( self :Union[str, Any] , __magic_name__ :Vector ):
'''simple docstring'''
...
def __mul__( self :Any , __magic_name__ :float | Vector ):
'''simple docstring'''
if isinstance(__magic_name__ , __magic_name__ ): # matrix-vector
if len(__magic_name__ ) == self.__width:
a = zero_vector(self.__height )
for i in range(self.__height ):
a = [
self.__matrix[i][j] * other.component(__magic_name__ )
for j in range(self.__width )
]
ans.change_component(__magic_name__ , sum(__magic_name__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(__magic_name__ , (int, float) ): # matrix-scalar
a = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__magic_name__ , self.__width , self.__height )
return None
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
return self.__height
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
return self.__width
def lowerCamelCase__ ( self :int , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def lowerCamelCase__ ( self :Dict , __magic_name__ :int , __magic_name__ :int , __magic_name__ :float ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
a = value
else:
raise Exception("""change_component: indices out of bounds""" )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
a = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__magic_name__ ) ):
a = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__magic_name__ , self.__width - 1 , self.__height - 1 ).determinant()
def lowerCamelCase__ ( self :Tuple , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__magic_name__ , __magic_name__ )
else:
raise Exception("""Indices out of bounds""" )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
a = [
self.__matrix[0][y] * self.cofactor(0 , __magic_name__ ) for y in range(self.__width )
]
return sum(__magic_name__ )
def __A ( __lowerCamelCase ) -> Matrix:
a = [[0] * n for _ in range(__lowerCamelCase )]
return Matrix(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Matrix:
random.seed(__lowerCamelCase )
a = [
[random.randint(__lowerCamelCase , __lowerCamelCase ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )
]
return Matrix(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
| 228
|
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
a = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__lowerCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 228
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCamelCase__ : str = DisjunctiveConstraint(__magic_name__ )
self.assertTrue(isinstance(dc.token_ids, __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : str = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(__magic_name__ ) # fails here
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : str = [[1, 2, 3], [1, 2, 4]]
UpperCamelCase__ : Union[str, Any] = DisjunctiveConstraint(__magic_name__ )
UpperCamelCase__ : Tuple = dc.update(1 )
UpperCamelCase__ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase__ : Union[str, Any] = dc.update(2 )
UpperCamelCase__ : Dict = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase__ : List[Any] = dc.update(3 )
UpperCamelCase__ : Dict = stepped is True and completed is True and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCamelCase__ : Union[str, Any] = DisjunctiveConstraint(__magic_name__ )
UpperCamelCase__ : Tuple = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase__ : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase__ : Union[str, Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCamelCase__ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCamelCase__ : Optional[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase__ : Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase__ : str = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 366
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
UpperCAmelCase_ = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> Optional[Any]:
UpperCamelCase__ : Any = test_results.split(''' ''' )
UpperCamelCase__ : Dict = 0
UpperCamelCase__ : int = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCamelCase__ : List[Any] = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__UpperCAmelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] ) -> Tuple:
UpperCamelCase__ : List[Any] = {}
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : int = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , __UpperCAmelCase ):
UpperCamelCase__ : Any = True
UpperCamelCase__ : Optional[Any] = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
UpperCamelCase__ : List[Any] = line
UpperCamelCase__ : List[Any] = False
return failures
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Dict = title
UpperCamelCase__ : Tuple = doc_test_results['''time_spent'''].split(''',''' )[0]
UpperCamelCase__ : Optional[Any] = doc_test_results['''success''']
UpperCamelCase__ : str = doc_test_results['''failures''']
UpperCamelCase__ : str = self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCamelCase__ : List[Any] = doc_test_results
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[Any] = [self._time_spent]
UpperCamelCase__ : str = 0
for time in time_spent:
UpperCamelCase__ : List[Any] = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__magic_name__ ) == 1:
UpperCamelCase__ : List[Any] = [0, 0, time_parts[0]]
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : int = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[str] = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(__magic_name__ )}h{int(__magic_name__ )}m{int(__magic_name__ )}s"
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[Any] = 40
UpperCamelCase__ : Tuple = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(__magic_name__, __magic_name__ )}
UpperCamelCase__ : List[str] = ''''''
for category, failures in category_failures.items():
if len(__magic_name__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__magic_name__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : str = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__magic_name__ )
@staticmethod
def UpperCamelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(__magic_name__ )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], text='''There was an issue running the tests.''', blocks=__magic_name__, )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
UpperCamelCase__ : List[str] = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.'''
UpperCamelCase__ : Optional[Any] = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], blocks=self.payload, text=__magic_name__, )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = ''''''
for key, value in failures.items():
UpperCamelCase__ : List[Any] = value[:200] + ''' [Truncated]''' if len(__magic_name__ ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
UpperCamelCase__ : Union[str, Any] = job_name
UpperCamelCase__ : Any = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
UpperCamelCase__ : Union[str, Any] = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
UpperCamelCase__ : Optional[int] = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
UpperCamelCase__ : Optional[int] = sorted(self.doc_test_results.items(), key=lambda __magic_name__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
UpperCamelCase__ : Any = f"*Num failures* :{len(job_result['failed'] )} \n"
UpperCamelCase__ : Optional[Any] = job_result['''failures''']
UpperCamelCase__ : Optional[Any] = self.get_reply_blocks(__magic_name__, __magic_name__, __magic_name__, text=__magic_name__ )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], text=f"Results for {job}", blocks=__magic_name__, thread_ts=self.thread_ts['''ts'''], )
time.sleep(1 )
def lowerCAmelCase_ ( ) -> Dict:
UpperCamelCase__ : Any = os.environ['''GITHUB_RUN_ID''']
UpperCamelCase__ : Tuple = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
UpperCamelCase__ : Optional[int] = requests.get(__UpperCAmelCase ).json()
UpperCamelCase__ : List[Any] = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
UpperCamelCase__ : List[Any] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__UpperCAmelCase ):
UpperCamelCase__ : Any = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , __UpperCAmelCase )
return {}
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> List[Any]:
UpperCamelCase__ : Optional[int] = {}
if os.path.exists(__UpperCAmelCase ):
UpperCamelCase__ : Dict = os.listdir(__UpperCAmelCase )
for file in files:
try:
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , encoding='''utf-8''' ) as f:
UpperCamelCase__ : int = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(__UpperCAmelCase , __UpperCAmelCase )}." ) from e
return _artifact
def lowerCAmelCase_ ( ) -> str:
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Any = name
UpperCamelCase__ : int = []
def __str__( self ) -> Tuple:
"""simple docstring"""
return self.name
def UpperCamelCase__ ( self, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
self.paths.append({'''name''': self.name, '''path''': path} )
UpperCamelCase__ : Dict[str, Artifact] = {}
UpperCamelCase__ : Union[str, Any] = filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCamelCase__ : Optional[int] = directory
if artifact_name not in _available_artifacts:
UpperCamelCase__ : Union[str, Any] = Artifact(__UpperCAmelCase )
_available_artifacts[artifact_name].add_path(__UpperCAmelCase )
return _available_artifacts
if __name__ == "__main__":
UpperCAmelCase_ = get_job_links()
UpperCAmelCase_ = retrieve_available_artifacts()
UpperCAmelCase_ = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
UpperCAmelCase_ = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
UpperCAmelCase_ = github_actions_job_links.get('run_doctests')
UpperCAmelCase_ = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
UpperCAmelCase_ = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = handle_test_results(artifact['stats'])
UpperCAmelCase_ = failed
UpperCAmelCase_ = success
UpperCAmelCase_ = time_spent[1:-1] + ', '
UpperCAmelCase_ = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
UpperCAmelCase_ = line.replace('FAILED ', '')
UpperCAmelCase_ = line.split()[0].replace('\n', '')
if "::" in line:
UpperCAmelCase_ , UpperCAmelCase_ = line.split('::')
else:
UpperCAmelCase_ , UpperCAmelCase_ = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
UpperCAmelCase_ = docs[file_regex]
doc_test_results[category]["failed"].append(test)
UpperCAmelCase_ = all_failures[test] if test in all_failures else 'N/A'
UpperCAmelCase_ = failure
break
UpperCAmelCase_ = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 247
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowercase__ : int = None
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Optional[int] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowercase__ : Dict = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
lowercase__ : List[Any] = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
lowercase__ : List[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = MBartTokenizer
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self : List[Any] , __lowercase : str=None , __lowercase : Optional[int]=None , __lowercase : int="<s>" , __lowercase : List[str]="</s>" , __lowercase : Union[str, Any]="</s>" , __lowercase : List[Any]="<s>" , __lowercase : Optional[Any]="<unk>" , __lowercase : List[str]="<pad>" , __lowercase : Optional[int]="<mask>" , __lowercase : List[Any]=None , __lowercase : List[str]=None , __lowercase : Optional[Any]=None , **__lowercase : Any , ):
"""simple docstring"""
snake_case_ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
snake_case_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
snake_case_ = {
lang_code: self.convert_tokens_to_ids(__lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case_ = src_lang if src_lang is not None else "en_XX"
snake_case_ = self.convert_tokens_to_ids(self._src_lang )
snake_case_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def snake_case__ ( self : Tuple ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def snake_case__ ( self : List[Any] , __lowercase : str ):
"""simple docstring"""
snake_case_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case__ ( self : Any , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case__ ( self : int , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self : int , __lowercase : str , __lowercase : str , __lowercase : Optional[str] , __lowercase : Optional[str] , **__lowercase : Optional[Any] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
snake_case_ = src_lang
snake_case_ = self(__lowercase , add_special_tokens=__lowercase , return_tensors=__lowercase , **__lowercase )
snake_case_ = self.convert_tokens_to_ids(__lowercase )
snake_case_ = tgt_lang_id
return inputs
def snake_case__ ( self : Optional[int] , __lowercase : List[str] , __lowercase : str = "en_XX" , __lowercase : Optional[List[str]] = None , __lowercase : str = "ro_RO" , **__lowercase : List[Any] , ):
"""simple docstring"""
snake_case_ = src_lang
snake_case_ = tgt_lang
return super().prepare_seqaseq_batch(__lowercase , __lowercase , **__lowercase )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case__ ( self : int , __lowercase : str ):
"""simple docstring"""
snake_case_ = self.convert_tokens_to_ids(__lowercase )
snake_case_ = []
snake_case_ = [self.eos_token_id, self.cur_lang_code]
snake_case_ = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def snake_case__ ( self : str , __lowercase : str ):
"""simple docstring"""
snake_case_ = self.convert_tokens_to_ids(__lowercase )
snake_case_ = []
snake_case_ = [self.eos_token_id, self.cur_lang_code]
snake_case_ = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def snake_case__ ( self : Dict , __lowercase : str , __lowercase : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowercase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory." )
return
snake_case_ = os.path.join(
__lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 187
|
from __future__ import annotations
lowercase__ : str = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowerCamelCase__ ( _A , _A , _A , _A , _A , ):
'''simple docstring'''
snake_case_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_A ) )
] # the reference grid
snake_case_ = 1
snake_case_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_A ) )
] # the action grid
snake_case_ = init[0]
snake_case_ = init[1]
snake_case_ = 0
snake_case_ = g + heuristic[x][y] # cost from starting cell to destination cell
snake_case_ = [[f, g, x, y]]
snake_case_ = False # flag that is set when search is complete
snake_case_ = False # flag set if we can't find expand
while not found and not resign:
if len(_A ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
snake_case_ = cell.pop()
snake_case_ = next_cell[2]
snake_case_ = next_cell[3]
snake_case_ = next_cell[1]
if x == goal[0] and y == goal[1]:
snake_case_ = True
else:
for i in range(len(_A ) ): # to try out different valid actions
snake_case_ = x + DIRECTIONS[i][0]
snake_case_ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_A ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
snake_case_ = g + cost
snake_case_ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
snake_case_ = 1
snake_case_ = i
snake_case_ = []
snake_case_ = goal[0]
snake_case_ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
snake_case_ = x - DIRECTIONS[action[x][y]][0]
snake_case_ = y - DIRECTIONS[action[x][y]][1]
snake_case_ = xa
snake_case_ = ya
invpath.append([x, y] )
snake_case_ = []
for i in range(len(_A ) ):
path.append(invpath[len(_A ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowercase__ : Union[str, Any] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowercase__ : Optional[Any] = [0, 0]
# all coordinates are given in format [y,x]
lowercase__ : Tuple = [len(grid) - 1, len(grid[0]) - 1]
lowercase__ : Dict = 1
# the cost map which pushes the path closer to the goal
lowercase__ : Any = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowercase__ : Union[str, Any] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowercase__ : int = 99
lowercase__ , lowercase__ : Tuple = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 187
| 1
|
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : str, _UpperCamelCase : str, _UpperCamelCase : Path, _UpperCamelCase : str = None, _UpperCamelCase : str = None, _UpperCamelCase : str = None, ) -> List[Any]:
if config_name_or_path is None:
A_ = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
A_ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
A_ = question_encoder_name_or_path
A_ = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
A_ = RagConfig.from_pretrained(_UpperCamelCase )
A_ = AutoConfig.from_pretrained(_UpperCamelCase )
A_ = AutoConfig.from_pretrained(_UpperCamelCase )
A_ = gen_config
A_ = question_encoder_config
A_ = model_class.from_pretrained_question_encoder_generator(
_UpperCamelCase, _UpperCamelCase, config=_UpperCamelCase )
rag_model.save_pretrained(_UpperCamelCase )
# Sanity check.
model_class.from_pretrained(_UpperCamelCase )
# Save tokenizers.
A_ = AutoTokenizer.from_pretrained(_UpperCamelCase )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
A_ = AutoTokenizer.from_pretrained(_UpperCamelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
__snake_case : str = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
__snake_case : Optional[int] = parser.parse_args()
__snake_case : List[Any] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 350
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__snake_case : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
__snake_case : str = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__snake_case : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _UpperCAmelCase ( _UpperCamelCase : str ) -> int:
with open(_UpperCamelCase, '''rb''' ) as f:
A_ = Image.open(_UpperCamelCase )
return im.convert('''RGB''' )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the training data.'} )
__lowercase : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the validation data.'} )
__lowercase : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
__lowercase : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowercase : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __A ( self ) -> int:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_UpperCamelCase )} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
__lowercase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowercase : str = field(default=_UpperCamelCase , metadata={'help': 'Name or path of preprocessor config.'} )
__lowercase : bool = field(
default=_UpperCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__lowercase : bool = field(
default=_UpperCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _UpperCAmelCase ( _UpperCamelCase : str ) -> Dict:
A_ = torch.stack([example['''pixel_values'''] for example in examples] )
A_ = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _UpperCAmelCase ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A_ ,A_ ,A_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A_ ,A_ ,A_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''', _UpperCamelCase, _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A_ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
A_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
A_ = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, task='''image-classification''', use_auth_token=True if model_args.use_auth_token else None, )
else:
A_ = {}
if data_args.train_dir is not None:
A_ = os.path.join(data_args.train_dir, '''**''' )
if data_args.validation_dir is not None:
A_ = os.path.join(data_args.validation_dir, '''**''' )
A_ = load_dataset(
'''imagefolder''', data_files=_UpperCamelCase, cache_dir=model_args.cache_dir, task='''image-classification''', )
# If we don't have a validation split, split off a percentage of train as validation.
A_ = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, _UpperCamelCase ) and data_args.train_val_split > 0.0:
A_ = dataset['''train'''].train_test_split(data_args.train_val_split )
A_ = split['''train''']
A_ = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
A_ = dataset['''train'''].features['''labels'''].names
A_ ,A_ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
A_ = str(_UpperCamelCase )
A_ = label
# Load the accuracy metric from the datasets package
A_ = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Optional[Any] ):
return metric.compute(predictions=np.argmax(p.predictions, axis=1 ), references=p.label_ids )
A_ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path, num_labels=len(_UpperCamelCase ), labelaid=_UpperCamelCase, idalabel=_UpperCamelCase, finetuning_task='''image-classification''', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
A_ = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=_UpperCamelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
A_ = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
A_ = image_processor.size['''shortest_edge''']
else:
A_ = (image_processor.size['''height'''], image_processor.size['''width'''])
A_ = Normalize(mean=image_processor.image_mean, std=image_processor.image_std )
A_ = Compose(
[
RandomResizedCrop(_UpperCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
A_ = Compose(
[
Resize(_UpperCamelCase ),
CenterCrop(_UpperCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(_UpperCamelCase : Dict ):
A_ = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(_UpperCamelCase : Any ):
A_ = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
A_ = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_UpperCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
A_ = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_UpperCamelCase )
# Initalize our trainer
A_ = Trainer(
model=_UpperCamelCase, args=_UpperCamelCase, train_dataset=dataset['''train'''] if training_args.do_train else None, eval_dataset=dataset['''validation'''] if training_args.do_eval else None, compute_metrics=_UpperCamelCase, tokenizer=_UpperCamelCase, data_collator=_UpperCamelCase, )
# Training
if training_args.do_train:
A_ = None
if training_args.resume_from_checkpoint is not None:
A_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A_ = last_checkpoint
A_ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics('''train''', train_result.metrics )
trainer.save_metrics('''train''', train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A_ = trainer.evaluate()
trainer.log_metrics('''eval''', _UpperCamelCase )
trainer.save_metrics('''eval''', _UpperCamelCase )
# Write model card and (optionally) push to hub
A_ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 18
| 0
|
'''simple docstring'''
__lowerCAmelCase : Dict =[
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 237
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=UpperCamelCase__ ):
__lowercase = ["""note_seq"""]
def __init__( self :Optional[Any] , *lowercase_ :List[Any] , **lowercase_ :List[str] )-> int:
requires_backends(self , ["note_seq"] )
@classmethod
def UpperCAmelCase_ ( cls :str , *lowercase_ :Union[str, Any] , **lowercase_ :Any )-> Optional[int]:
requires_backends(cls , ["note_seq"] )
@classmethod
def UpperCAmelCase_ ( cls :Dict , *lowercase_ :Tuple , **lowercase_ :List[Any] )-> Optional[Any]:
requires_backends(cls , ["note_seq"] )
| 237
| 1
|
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
a = MobileBertTokenizer
a = MobileBertTokenizerFast
a = True
a = True
a = filter_non_english
a = "google/mobilebert-uncased"
def A ( self : Union[str, Any]):
super().setUp()
_A : Optional[int] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_A : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
_A : Optional[Any] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def A ( self : Tuple , SCREAMING_SNAKE_CASE : Tuple):
_A : Any = 'UNwant\u00E9d,running'
_A : Optional[Any] = 'unwanted, running'
return input_text, output_text
def A ( self : List[str]):
_A : Dict = self.tokenizer_class(self.vocab_file)
_A : Optional[int] = tokenizer.tokenize('UNwant\u00E9d,running')
self.assertListEqual(SCREAMING_SNAKE_CASE , ['un', '##want', '##ed', ',', 'runn', '##ing'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE) , [9, 6, 7, 12, 10, 11])
def A ( self : Tuple):
if not self.test_rust_tokenizer:
return
_A : str = self.get_tokenizer()
_A : List[str] = self.get_rust_tokenizer()
_A : Optional[int] = 'UNwant\u00E9d,running'
_A : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE)
_A : Optional[Any] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE)
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_A : int = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE)
_A : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE)
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_A : str = self.get_rust_tokenizer()
_A : Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE)
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
# With lower casing
_A : List[str] = self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE)
_A : Tuple = self.get_rust_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE)
_A : List[str] = 'UNwant\u00E9d,running'
_A : Any = tokenizer.tokenize(SCREAMING_SNAKE_CASE)
_A : str = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE)
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_A : int = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE)
_A : Tuple = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE)
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_A : Tuple = self.get_rust_tokenizer()
_A : Any = tokenizer.encode(SCREAMING_SNAKE_CASE)
_A : str = rust_tokenizer.encode(SCREAMING_SNAKE_CASE)
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def A ( self : Any):
_A : int = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz') , ['ah', '\u535A', '\u63A8', 'zz'])
def A ( self : List[Any]):
_A : Tuple = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['hello', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def A ( self : int):
_A : Any = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hällo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['h\u00E9llo'])
def A ( self : Optional[int]):
_A : Optional[Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def A ( self : Optional[int]):
_A : Dict = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def A ( self : Optional[int]):
_A : str = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'])
def A ( self : Optional[Any]):
_A : Tuple = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'])
def A ( self : Union[str, Any]):
_A : List[Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'])
def A ( self : Tuple):
_A : List[str] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE , never_split=['[UNK]'])
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'])
def A ( self : Optional[int]):
_A : Tuple = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_A : int = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE):
_A : Any = i
_A : List[str] = WordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE , unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize('') , [])
self.assertListEqual(tokenizer.tokenize('unwanted running') , ['un', '##want', '##ed', 'runn', '##ing'])
self.assertListEqual(tokenizer.tokenize('unwantedX running') , ['[UNK]', 'runn', '##ing'])
def A ( self : Any):
self.assertTrue(_is_whitespace(' '))
self.assertTrue(_is_whitespace('\t'))
self.assertTrue(_is_whitespace('\r'))
self.assertTrue(_is_whitespace('\n'))
self.assertTrue(_is_whitespace('\u00A0'))
self.assertFalse(_is_whitespace('A'))
self.assertFalse(_is_whitespace('-'))
def A ( self : Optional[int]):
self.assertTrue(_is_control('\u0005'))
self.assertFalse(_is_control('A'))
self.assertFalse(_is_control(' '))
self.assertFalse(_is_control('\t'))
self.assertFalse(_is_control('\r'))
def A ( self : Optional[int]):
self.assertTrue(_is_punctuation('-'))
self.assertTrue(_is_punctuation('$'))
self.assertTrue(_is_punctuation('`'))
self.assertTrue(_is_punctuation('.'))
self.assertFalse(_is_punctuation('A'))
self.assertFalse(_is_punctuation(' '))
def A ( self : int):
_A : str = self.get_tokenizer()
_A : Optional[Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']])
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']])
@slow
def A ( self : int):
_A : Optional[int] = self.tokenizer_class.from_pretrained('google/mobilebert-uncased')
_A : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=SCREAMING_SNAKE_CASE)
_A : Dict = tokenizer.encode('multi-sequence build' , add_special_tokens=SCREAMING_SNAKE_CASE)
_A : Optional[int] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE)
_A : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def A ( self : Optional[int]):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
_A : Optional[int] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
_A : int = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
_A : Optional[int] = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , )
_A : int = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE , 'do_lower_case') else False
_A : List[str] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids']))
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'])
def A ( self : str):
_A : Dict = ['的', '人', '有']
_A : Union[str, Any] = ''.join(SCREAMING_SNAKE_CASE)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
_A : Optional[int] = True
_A : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
_A : str = tokenizer_p.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE)
_A : List[Any] = tokenizer_r.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE)
_A : Dict = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE)
_A : List[Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_A : List[str] = False
_A : int = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
_A : int = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
_A : List[Any] = tokenizer_r.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE)
_A : Dict = tokenizer_p.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE)
_A : int = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE)
# it is expected that only the first Chinese character is not preceded by "##".
_A : Optional[Any] = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE)
]
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
| 362
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : List[str] = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 227
| 0
|
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def UpperCAmelCase ( UpperCAmelCase="ro" , UpperCAmelCase="en" , UpperCAmelCase="wmt16" , UpperCAmelCase=None ) -> None:
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('run pip install datasets' )
snake_case_ = f'{src_lang}-{tgt_lang}'
print(f'Converting {dataset}-{pair}' )
snake_case_ = datasets.load_dataset(UpperCAmelCase , UpperCAmelCase )
if save_dir is None:
snake_case_ = f'{dataset}-{pair}'
snake_case_ = Path(UpperCAmelCase )
save_dir.mkdir(exist_ok=UpperCAmelCase )
for split in ds.keys():
print(f'Splitting {split} with {ds[split].num_rows} records' )
# to save to val.source, val.target like summary datasets
snake_case_ = 'val' if split == 'validation' else split
snake_case_ = save_dir.joinpath(f'{fn}.source' )
snake_case_ = save_dir.joinpath(f'{fn}.target' )
snake_case_ = src_path.open('w+' )
snake_case_ = tgt_path.open('w+' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
snake_case_ = x['translation']
src_fp.write(ex[src_lang] + '\n' )
tgt_fp.write(ex[tgt_lang] + '\n' )
print(f'Saved {dataset} dataset to {save_dir}' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 69
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Optional[Any] = 0
lowercase__ : Any = [0]
lowercase__ : List[Any] = [0]
lowercase__ : Optional[Any] = len(__lowerCAmelCase )
self.assertEqual(k.knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , 0 )
lowercase__ : List[str] = [60]
lowercase__ : List[str] = [10]
lowercase__ : List[str] = len(__lowerCAmelCase )
self.assertEqual(k.knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , 0 )
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Optional[Any] = 3
lowercase__ : List[Any] = [1, 2, 3]
lowercase__ : Union[str, Any] = [3, 2, 1]
lowercase__ : List[Any] = len(__lowerCAmelCase )
self.assertEqual(k.knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , 5 )
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : int = 50
lowercase__ : Any = [60, 100, 120]
lowercase__ : int = [10, 20, 30]
lowercase__ : Optional[Any] = len(__lowerCAmelCase )
self.assertEqual(k.knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 198
| 0
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[str]:
'''simple docstring'''
__lowercase= k_size // 2
__lowercase, __lowercase= mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__lowercase= 1 / (2 * pi * sigma) * exp(-(square(lowercase__ ) + square(lowercase__ )) / (2 * square(lowercase__ )) )
return g
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase, __lowercase= image.shape[0], image.shape[1]
# dst image height and width
__lowercase= height - k_size + 1
__lowercase= width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__lowercase= zeros((dst_height * dst_width, k_size * k_size) )
__lowercase= 0
for i, j in product(range(lowercase__ ) , range(lowercase__ ) ):
__lowercase= ravel(image[i : i + k_size, j : j + k_size] )
__lowercase= window
row += 1
# turn the kernel into shape(k*k, 1)
__lowercase= gen_gaussian_kernel(lowercase__ , lowercase__ )
__lowercase= ravel(lowercase__ )
# reshape and get the dst image
__lowercase= dot(lowercase__ , lowercase__ ).reshape(lowercase__ , lowercase__ ).astype(lowercase__ )
return dst
if __name__ == "__main__":
# read original image
lowerCAmelCase = imread(R'''../image_data/lena.jpg''')
# turn image in gray scale value
lowerCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
lowerCAmelCase = gaussian_filter(gray, 3, sigma=1)
lowerCAmelCase = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 304
|
from math import factorial, radians
def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float:
'''simple docstring'''
__lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__lowercase= radians(lowercase__ )
__lowercase= angle_in_radians
__lowercase= 3
__lowercase= -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
__lowercase= -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 304
| 1
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( A_ ):
"""simple docstring"""
UpperCamelCase_ : Dict ='Speech2TextFeatureExtractor'
UpperCamelCase_ : Dict ='Speech2TextTokenizer'
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
super().__init__(snake_case_ , snake_case_ )
UpperCamelCase :Optional[int] = self.feature_extractor
UpperCamelCase :Dict = False
def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any:
if self._in_target_context_manager:
return self.current_processor(*snake_case_ , **snake_case_ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
UpperCamelCase :Union[str, Any] = kwargs.pop('''raw_speech''' )
else:
UpperCamelCase :Union[str, Any] = kwargs.pop('''audio''' , snake_case_ )
UpperCamelCase :str = kwargs.pop('''sampling_rate''' , snake_case_ )
UpperCamelCase :int = kwargs.pop('''text''' , snake_case_ )
if len(snake_case_ ) > 0:
UpperCamelCase :Tuple = args[0]
UpperCamelCase :int = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
UpperCamelCase :str = self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ )
if text is not None:
UpperCamelCase :Optional[Any] = self.tokenizer(snake_case_ , **snake_case_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCamelCase :Optional[Any] = encodings['''input_ids''']
return inputs
def UpperCAmelCase ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def UpperCAmelCase ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any:
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@contextmanager
def UpperCAmelCase ( self ) -> List[Any]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
UpperCamelCase :Union[str, Any] = True
UpperCamelCase :Any = self.tokenizer
yield
UpperCamelCase :Dict = self.feature_extractor
UpperCamelCase :str = False
| 259
|
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
SCREAMING_SNAKE_CASE = True
from torch.cuda.amp import autocast
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowercase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''Whether to log verbose messages or not.'''}, )
lowercase__ = field(
default=2.0, metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
lowercase__ = field(
default=0.5, metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
lowercase__ = field(
default=0.99_99_95, metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
A__ = logging.WARNING
if model_args.verbose_logging:
A__ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
A__ = logging.INFO
logger.setLevel(lowercase_ )
@dataclass
class UpperCAmelCase_ :
lowercase__ = field(
default=A_, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowercase__ = field(
default='''train''', metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
}, )
lowercase__ = field(
default='''validation''', metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
}, )
lowercase__ = field(
default='''file''', metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowercase__ = field(
default=1, metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
lowercase__ = field(
default=20.0, metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class UpperCAmelCase_ :
lowercase__ = 42
lowercase__ = 42
lowercase__ = "longest"
lowercase__ = None
lowercase__ = None
def __call__( self : Tuple , snake_case_ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
A__ = self.feature_extractor.pad(
snake_case_ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
A__ = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
A__ = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
A__ = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
A__ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
A__ = 1
A__ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
A__ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=snake_case_ , min_masks=2 , )
return batch
class UpperCAmelCase_ ( A_ ):
def __init__( self : Any , *snake_case_ : Dict , snake_case_ : Optional[int]=1 , snake_case_ : str=0 , snake_case_ : str=1.0 , **snake_case_ : List[str] ) -> List[str]:
'''simple docstring'''
super().__init__(*snake_case_ , **snake_case_ )
A__ = 0
A__ = max_gumbel_temp
A__ = min_gumbel_temp
A__ = gumbel_temp_decay
def __magic_name__ ( self : Tuple , snake_case_ : nn.Module , snake_case_ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
'''simple docstring'''
model.train()
A__ = self._prepare_inputs(snake_case_ )
if self.use_amp:
with autocast():
A__ = self.compute_loss(snake_case_ , snake_case_ )
else:
A__ = self.compute_loss(snake_case_ , snake_case_ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
A__ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
A__ = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
A__ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(snake_case_ ).backward()
elif self.use_apex:
with amp.scale_loss(snake_case_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(snake_case_ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__, A__, A__ = parser.parse_args_into_dataclasses()
configure_logger(lowercase_ , lowercase_ )
# Downloading and loading a dataset from the hub.
A__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
A__ = DatasetDict()
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
A__ = DatasetDict()
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
A__ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=lowercase_ )
def prepare_dataset(lowercase_ ):
# check that all files have the correct sampling rate
A__, A__ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
A__ = datasets.map(
lowercase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
A__ = vectorized_datasets.filter(
lambda lowercase_ : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(lowercase_ ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
A__ = vectorized_datasets.map(
lowercase_ , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
A__ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
A__ = WavaVecaForPreTraining(lowercase_ )
A__ = DataCollatorForWavaVecaPretraining(model=lowercase_ , feature_extractor=lowercase_ )
A__ = WavaVecaPreTrainer(
model=lowercase_ , data_collator=lowercase_ , args=lowercase_ , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=lowercase_ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 247
| 0
|
"""simple docstring"""
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowerCAmelCase__ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , *snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ):
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
lowerCAmelCase : List[Any] = eval_examples
lowerCAmelCase : str = post_process_function
lowerCAmelCase : List[Any] = quant_trainer_args
lowerCAmelCase : int = 128 # default number of calibration samples
def lowercase__ ( self , snake_case__=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
lowerCAmelCase : Dict = calib_dataset if calib_dataset is not None else self.calib_dataset
lowerCAmelCase : Tuple = self._remove_unused_columns(snake_case__ , description="Calibration" )
return DataLoader(
snake_case__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=snake_case__ , )
def lowercase__ ( self , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : str = self.train_dataset if calib_dataset is None else calib_dataset
lowerCAmelCase : Any = self.get_calib_dataloader(snake_case__ )
lowerCAmelCase : Any = self.model
quant_trainer.configure_model(snake_case__ , self.quant_trainer_args , calib=snake_case__ )
model.eval()
quant_trainer.enable_calibration(snake_case__ )
logger.info("***** Running calibration *****" )
logger.info(f""" Num examples = {self.calib_num}""" )
logger.info(f""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(snake_case__ ):
# Prediction step
lowerCAmelCase : Dict = self.prediction_step(snake_case__ , snake_case__ , prediction_loss_only=snake_case__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(snake_case__ , self.quant_trainer_args )
lowerCAmelCase : Union[str, Any] = model
def lowercase__ ( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = "eval" ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCAmelCase : str = self.get_eval_dataloader(snake_case__ )
lowerCAmelCase : List[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase : Optional[Any] = self.compute_metrics
lowerCAmelCase : int = None
lowerCAmelCase : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase : Tuple = eval_loop(
snake_case__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case__ , )
finally:
lowerCAmelCase : List[str] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
lowerCAmelCase : Optional[int] = self.post_process_function(snake_case__ , snake_case__ , output.predictions )
lowerCAmelCase : str = self.compute_metrics(snake_case__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowerCAmelCase : Optional[int] = metrics.pop(snake_case__ )
self.log(snake_case__ )
else:
lowerCAmelCase : Tuple = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCAmelCase : Union[str, Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , snake_case__ )
return metrics
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__ = "test" ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.get_test_dataloader(snake_case__ )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase : List[Any] = self.compute_metrics
lowerCAmelCase : int = None
lowerCAmelCase : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase : str = eval_loop(
snake_case__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case__ , )
finally:
lowerCAmelCase : str = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCAmelCase : Any = self.post_process_function(snake_case__ , snake_case__ , output.predictions , "predict" )
lowerCAmelCase : Any = self.compute_metrics(snake_case__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowerCAmelCase : str = metrics.pop(snake_case__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=snake_case__ )
def lowercase__ ( self , snake_case__="./" ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.eval_dataset
lowerCAmelCase : Tuple = self.get_eval_dataloader(snake_case__ )
lowerCAmelCase : Union[str, Any] = next(iter(snake_case__ ) )
# saving device - to make it consistent
lowerCAmelCase : Optional[Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
lowerCAmelCase : Dict = tuple(v.to(snake_case__ ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
lowerCAmelCase : Tuple = True
lowerCAmelCase : Optional[int] = self.model.to(snake_case__ )
model.eval()
model.float()
lowerCAmelCase : Union[str, Any] = model.module if hasattr(snake_case__ , "module" ) else model
quant_trainer.configure_model(snake_case__ , self.quant_trainer_args )
lowerCAmelCase : str = os.path.join(snake_case__ , "model.onnx" )
logger.info(f"""exporting model to {output_model_file}""" )
lowerCAmelCase : Optional[Any] = {0: "batch_size", 1: "seq_len"}
torch.onnx.export(
snake_case__ , snake_case__ , snake_case__ , export_params=snake_case__ , opset_version=13 , do_constant_folding=snake_case__ , input_names=["input_ids", "attention_mask", "token_type_ids"] , output_names=["output_start_logits", "output_end_logits"] , dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
} , verbose=snake_case__ , )
logger.info("onnx export finished" )
| 350
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase__ = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
re.sub("<n>" , "" , SCREAMING_SNAKE_CASE ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE ) )
| 133
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase : Any = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 124
|
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(lowerCAmelCase , 2 )
return binary_recursive(lowerCAmelCase ) + str(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
SCREAMING_SNAKE_CASE_ : List[str] = "-" if number.startswith("-" ) else ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18
| 0
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__A = logging.get_logger(__name__)
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = UNetaDModel
lowercase_ = "sample"
@property
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =4
lowerCamelCase__: Optional[Any] =3
lowerCamelCase__: Any =(32, 32)
lowerCamelCase__: str =floats_tensor((batch_size, num_channels) + sizes).to(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =torch.tensor([10]).to(UpperCAmelCase_)
return {"sample": noise, "timestep": time_step}
@property
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
return (3, 32, 32)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] ={
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
lowerCamelCase__: Dict =self.dummy_input
return init_dict, inputs_dict
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = UNetaDModel
lowercase_ = "sample"
@property
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[str] =4
lowerCamelCase__: Optional[Any] =4
lowerCamelCase__: Union[str, Any] =(32, 32)
lowerCamelCase__: int =floats_tensor((batch_size, num_channels) + sizes).to(UpperCAmelCase_)
lowerCamelCase__: List[str] =torch.tensor([10]).to(UpperCAmelCase_)
return {"sample": noise, "timestep": time_step}
@property
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
return (4, 32, 32)
@property
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]:
'''simple docstring'''
return (4, 32, 32)
def SCREAMING_SNAKE_CASE_ (self : str) ->str:
'''simple docstring'''
lowerCamelCase__: Tuple ={
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
lowerCamelCase__: int =self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(UpperCAmelCase_)
lowerCamelCase__: int =model(**self.dummy_input).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU")
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Any:
'''simple docstring'''
lowerCamelCase__: List[Any] =UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_)
model.to(UpperCAmelCase_)
lowerCamelCase__: Tuple =model(**self.dummy_input).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU")
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Tuple =UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_)
model_accelerate.to(UpperCAmelCase_)
model_accelerate.eval()
lowerCamelCase__: List[Any] =torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0) , )
lowerCamelCase__: List[str] =noise.to(UpperCAmelCase_)
lowerCamelCase__: Dict =torch.tensor([10] * noise.shape[0]).to(UpperCAmelCase_)
lowerCamelCase__: List[str] =model_accelerate(UpperCAmelCase_ , UpperCAmelCase_)["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCamelCase__: Optional[int] =UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ , low_cpu_mem_usage=UpperCAmelCase_)
model_normal_load.to(UpperCAmelCase_)
model_normal_load.eval()
lowerCamelCase__: Union[str, Any] =model_normal_load(UpperCAmelCase_ , UpperCAmelCase_)["sample"]
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-3)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Any =UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update")
model.eval()
model.to(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0) , )
lowerCamelCase__: int =noise.to(UpperCAmelCase_)
lowerCamelCase__: Dict =torch.tensor([10] * noise.shape[0]).to(UpperCAmelCase_)
with torch.no_grad():
lowerCamelCase__: int =model(UpperCAmelCase_ , UpperCAmelCase_).sample
lowerCamelCase__: List[str] =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase__: List[str] =torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800])
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-3))
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = UNetaDModel
lowercase_ = "sample"
@property
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Any=(32, 32)) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: int =4
lowerCamelCase__: str =3
lowerCamelCase__: Dict =floats_tensor((batch_size, num_channels) + sizes).to(UpperCAmelCase_)
lowerCamelCase__: Tuple =torch.tensor(batch_size * [10]).to(dtype=torch.intaa , device=UpperCAmelCase_)
return {"sample": noise, "timestep": time_step}
@property
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Tuple ={
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1E-6,
"mid_block_scale_factor": math.sqrt(2.0),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
lowerCamelCase__: str =self.dummy_input
return init_dict, inputs_dict
@slow
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(UpperCAmelCase_)
lowerCamelCase__: Tuple =self.dummy_input
lowerCamelCase__: Union[str, Any] =floats_tensor((4, 3) + (256, 256)).to(UpperCAmelCase_)
lowerCamelCase__: List[Any] =noise
lowerCamelCase__: Dict =model(**UpperCAmelCase_)
assert image is not None, "Make sure output is not None"
@slow
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256")
model.to(UpperCAmelCase_)
lowerCamelCase__: Dict =4
lowerCamelCase__: Optional[Any] =3
lowerCamelCase__: str =(256, 256)
lowerCamelCase__: Dict =torch.ones((batch_size, num_channels) + sizes).to(UpperCAmelCase_)
lowerCamelCase__: List[Any] =torch.tensor(batch_size * [1E-4]).to(UpperCAmelCase_)
with torch.no_grad():
lowerCamelCase__: List[Any] =model(UpperCAmelCase_ , UpperCAmelCase_).sample
lowerCamelCase__: str =output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCamelCase__: Dict =torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10_980.7_129, -20_028.8_535, 8148.2822, 2342.2905, 567.7608])
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-2))
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update")
model.to(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =4
lowerCamelCase__: Optional[int] =3
lowerCamelCase__: Optional[Any] =(32, 32)
lowerCamelCase__: int =torch.ones((batch_size, num_channels) + sizes).to(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =torch.tensor(batch_size * [1E-4]).to(UpperCAmelCase_)
with torch.no_grad():
lowerCamelCase__: int =model(UpperCAmelCase_ , UpperCAmelCase_).sample
lowerCamelCase__: Optional[int] =output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCamelCase__: Dict =torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256])
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-2))
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict:
'''simple docstring'''
pass
| 366
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__A = ["text", "image", "audio"]
def lowerCAmelCase_ ( __a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =[]
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__a , __a ):
inputs.append(create_inputs(__a ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def lowerCAmelCase_ ( __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =[]
for output in outputs:
if isinstance(__a , (str, AgentText) ):
output_types.append("text" )
elif isinstance(__a , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(__a , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs"))
self.assertTrue(hasattr(self.tool , "outputs"))
lowerCamelCase__: Tuple =self.tool.inputs
for _input in inputs:
if isinstance(_input , UpperCAmelCase_):
for __input in _input:
self.assertTrue(__input in authorized_types)
else:
self.assertTrue(_input in authorized_types)
lowerCamelCase__: Optional[Any] =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: List[str] =create_inputs(self.tool.inputs)
lowerCamelCase__: str =self.tool(*UpperCAmelCase_)
# There is a single output
if len(self.tool.outputs) == 1:
lowerCamelCase__: Optional[Any] =[outputs]
self.assertListEqual(output_types(UpperCAmelCase_) , self.tool.outputs)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Any:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description"))
self.assertTrue(hasattr(self.tool , "default_checkpoint"))
self.assertTrue(self.tool.description.startswith("This is a tool that"))
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =create_inputs(self.tool.inputs)
lowerCamelCase__: Dict =self.tool(*UpperCAmelCase_)
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Tuple =[outputs]
self.assertEqual(len(UpperCAmelCase_) , len(self.tool.outputs))
for output, output_type in zip(UpperCAmelCase_ , self.tool.outputs):
lowerCamelCase__: Any =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(UpperCAmelCase_ , UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Dict) ->str:
'''simple docstring'''
lowerCamelCase__: Any =create_inputs(self.tool.inputs)
lowerCamelCase__: int =[]
for _input, input_type in zip(UpperCAmelCase_ , self.tool.inputs):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
lowerCamelCase__: Union[str, Any] =self.tool(*UpperCAmelCase_)
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: str =[outputs]
self.assertEqual(len(UpperCAmelCase_) , len(self.tool.outputs))
| 273
| 0
|
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : Union[str, Any] = SwinConfig(image_size=1_9_2)
if "base" in model_name:
a_ : Dict = 6
a_ : Union[str, Any] = 1_2_8
a_ : Optional[Any] = (2, 2, 1_8, 2)
a_ : Optional[Any] = (4, 8, 1_6, 3_2)
elif "large" in model_name:
a_ : Union[str, Any] = 1_2
a_ : Dict = 1_9_2
a_ : Tuple = (2, 2, 1_8, 2)
a_ : int = (6, 1_2, 2_4, 4_8)
else:
raise ValueError("""Model not supported, only supports base and large variants""")
a_ : Dict = window_size
a_ : Any = embed_dim
a_ : Union[str, Any] = depths
a_ : List[str] = num_heads
return config
def _UpperCAmelCase ( a__):
'''simple docstring'''
if "encoder.mask_token" in name:
a_ : Dict = name.replace("""encoder.mask_token""" , """embeddings.mask_token""")
if "encoder.patch_embed.proj" in name:
a_ : Optional[int] = name.replace("""encoder.patch_embed.proj""" , """embeddings.patch_embeddings.projection""")
if "encoder.patch_embed.norm" in name:
a_ : Optional[int] = name.replace("""encoder.patch_embed.norm""" , """embeddings.norm""")
if "attn.proj" in name:
a_ : Union[str, Any] = name.replace("""attn.proj""" , """attention.output.dense""")
if "attn" in name:
a_ : Any = name.replace("""attn""" , """attention.self""")
if "norm1" in name:
a_ : int = name.replace("""norm1""" , """layernorm_before""")
if "norm2" in name:
a_ : Optional[Any] = name.replace("""norm2""" , """layernorm_after""")
if "mlp.fc1" in name:
a_ : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""")
if "mlp.fc2" in name:
a_ : Tuple = name.replace("""mlp.fc2""" , """output.dense""")
if name == "encoder.norm.weight":
a_ : Tuple = """layernorm.weight"""
if name == "encoder.norm.bias":
a_ : Tuple = """layernorm.bias"""
if "decoder" in name:
pass
else:
a_ : Any = """swin.""" + name
return name
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
a_ : List[str] = orig_state_dict.pop(a__)
if "attn_mask" in key:
pass
elif "qkv" in key:
a_ : Union[str, Any] = key.split(""".""")
a_ : str = int(key_split[2])
a_ : Dict = int(key_split[4])
a_ : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a_ : Tuple = val[:dim, :]
a_ : Tuple = val[
dim : dim * 2, :
]
a_ : Any = val[-dim:, :]
else:
a_ : str = val[
:dim
]
a_ : Optional[Any] = val[
dim : dim * 2
]
a_ : Dict = val[
-dim:
]
else:
a_ : Optional[int] = val
return orig_state_dict
def _UpperCAmelCase ( a__ , a__ , a__ , a__):
'''simple docstring'''
a_ : Optional[Any] = torch.load(a__ , map_location="""cpu""")["""model"""]
a_ : List[str] = get_swin_config(a__)
a_ : Optional[int] = SwinForMaskedImageModeling(a__)
model.eval()
a_ : Any = convert_state_dict(a__ , a__)
model.load_state_dict(a__)
a_ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a_ : List[Any] = ViTImageProcessor(size={"""height""": 1_9_2, """width""": 1_9_2})
a_ : Tuple = Image.open(requests.get(a__ , stream=a__).raw)
a_ : Optional[Any] = image_processor(images=a__ , return_tensors="""pt""")
with torch.no_grad():
a_ : List[Any] = model(**a__).logits
print(outputs.keys())
print("""Looks ok!""")
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(a__)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(a__)
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''')
model.push_to_hub(f'''microsoft/{model_name}''')
image_processor.push_to_hub(f'''microsoft/{model_name}''')
if __name__ == "__main__":
__snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__snake_case : Optional[int] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 248
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_lowercase: Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCamelCase_ (self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None ):
"""simple docstring"""
a = {}
a = {}
if prompt is not None:
a = prompt
if generate_kwargs is not None:
a = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
a = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
a = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__(self , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ):
"""simple docstring"""
a = load_image(lowerCamelCase_ )
if prompt is not None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
F'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
a = self.model.config.model_type
if model_type == "git":
a = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
a = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids
a = [self.tokenizer.cls_token_id] + input_ids
a = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
a = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
a = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
a = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCamelCase_ )
else:
raise ValueError(F'''Model type {model_type} does not support conditional text generation''' )
else:
a = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
a = None
return model_inputs
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ):
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , lowerCamelCase_ )
and all(x is None for x in model_inputs["input_ids"] )
):
a = None
if generate_kwargs is None:
a = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
a = model_inputs.pop(self.model.main_input_name )
a = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ )
return model_outputs
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = []
for output_ids in model_outputs:
a = {
"generated_text": self.tokenizer.decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , )
}
records.append(lowerCamelCase_ )
return records
| 227
| 0
|
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase_ :
@staticmethod
def lowercase_ ( *_A : Any , **_A : Optional[int] ):
'''simple docstring'''
pass
def a__ ( lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : List[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
lowerCAmelCase__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase_ ( self : Dict , _A : Optional[int] , _A : Optional[int] , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = DepthEstimationPipeline(model=_A , image_processor=_A )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase_ ( self : Tuple , _A : int , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _A )
import datasets
UpperCAmelCase__ : List[Any] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
UpperCAmelCase__ : str = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , _A , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def lowercase_ ( self : Any ):
'''simple docstring'''
pass
@slow
@require_torch
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = '''Intel/dpt-large'''
UpperCAmelCase__ : List[Any] = pipeline('''depth-estimation''' , model=_A )
UpperCAmelCase__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
UpperCAmelCase__ : Optional[Any] = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 )
@require_torch
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 299
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 299
| 1
|
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_UpperCamelCase : Any = {
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 1_000,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
_UpperCamelCase : List[str] = {
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 1_000,
'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
_UpperCamelCase : Dict = {
'sample_size': 256,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
_UpperCamelCase : str = {
'num_train_timesteps': 40,
'sigma_min': 0.0_02,
'sigma_max': 80.0,
}
_UpperCamelCase : Any = {
'num_train_timesteps': 201,
'sigma_min': 0.0_02,
'sigma_max': 80.0,
}
_UpperCamelCase : int = {
'num_train_timesteps': 151,
'sigma_min': 0.0_02,
'sigma_max': 80.0,
}
def __UpperCAmelCase ( A : Dict ) -> int:
if isinstance(A , A ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def __UpperCAmelCase ( A : Optional[int] , A : int , A : int , A : Dict , A : List[Any]=False ) -> Optional[int]:
UpperCAmelCase_ : List[str] = checkpoint[F"{old_prefix}.in_layers.0.weight"]
UpperCAmelCase_ : Optional[Any] = checkpoint[F"{old_prefix}.in_layers.0.bias"]
UpperCAmelCase_ : List[str] = checkpoint[F"{old_prefix}.in_layers.2.weight"]
UpperCAmelCase_ : Optional[int] = checkpoint[F"{old_prefix}.in_layers.2.bias"]
UpperCAmelCase_ : Tuple = checkpoint[F"{old_prefix}.emb_layers.1.weight"]
UpperCAmelCase_ : List[str] = checkpoint[F"{old_prefix}.emb_layers.1.bias"]
UpperCAmelCase_ : Optional[Any] = checkpoint[F"{old_prefix}.out_layers.0.weight"]
UpperCAmelCase_ : List[Any] = checkpoint[F"{old_prefix}.out_layers.0.bias"]
UpperCAmelCase_ : str = checkpoint[F"{old_prefix}.out_layers.3.weight"]
UpperCAmelCase_ : str = checkpoint[F"{old_prefix}.out_layers.3.bias"]
if has_skip:
UpperCAmelCase_ : int = checkpoint[F"{old_prefix}.skip_connection.weight"]
UpperCAmelCase_ : List[str] = checkpoint[F"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def __UpperCAmelCase ( A : str , A : List[Any] , A : int , A : List[str] , A : int=None ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = checkpoint[F"{old_prefix}.qkv.weight"].chunk(3 , dim=0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = checkpoint[F"{old_prefix}.qkv.bias"].chunk(3 , dim=0 )
UpperCAmelCase_ : List[Any] = checkpoint[F"{old_prefix}.norm.weight"]
UpperCAmelCase_ : List[Any] = checkpoint[F"{old_prefix}.norm.bias"]
UpperCAmelCase_ : Dict = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : int = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : List[str] = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : int = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : List[str] = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : List[str] = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : Any = (
checkpoint[F"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase_ : str = checkpoint[F"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __UpperCAmelCase ( A : str , A : Tuple ) -> Any:
UpperCAmelCase_ : Tuple = torch.load(A , map_location='''cpu''' )
UpperCAmelCase_ : str = {}
UpperCAmelCase_ : Optional[Any] = checkpoint['''time_embed.0.weight''']
UpperCAmelCase_ : Any = checkpoint['''time_embed.0.bias''']
UpperCAmelCase_ : Optional[int] = checkpoint['''time_embed.2.weight''']
UpperCAmelCase_ : List[Any] = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase_ : Optional[int] = checkpoint['''label_emb.weight''']
UpperCAmelCase_ : Any = checkpoint['''input_blocks.0.0.weight''']
UpperCAmelCase_ : int = checkpoint['''input_blocks.0.0.bias''']
UpperCAmelCase_ : List[str] = unet_config['''down_block_types''']
UpperCAmelCase_ : Dict = unet_config['''layers_per_block''']
UpperCAmelCase_ : str = unet_config['''attention_head_dim''']
UpperCAmelCase_ : Union[str, Any] = unet_config['''block_out_channels''']
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : List[str] = channels_list[0]
for i, layer_type in enumerate(A ):
UpperCAmelCase_ : List[Any] = channels_list[i]
UpperCAmelCase_ : Union[str, Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A ):
UpperCAmelCase_ : int = F"down_blocks.{i}.resnets.{j}"
UpperCAmelCase_ : List[Any] = F"input_blocks.{current_layer}.0"
UpperCAmelCase_ : List[str] = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ : Optional[Any] = convert_resnet(A , A , A , A , has_skip=A )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A ):
UpperCAmelCase_ : List[str] = F"down_blocks.{i}.resnets.{j}"
UpperCAmelCase_ : List[str] = F"input_blocks.{current_layer}.0"
UpperCAmelCase_ : List[Any] = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ : Any = convert_resnet(A , A , A , A , has_skip=A )
UpperCAmelCase_ : Union[str, Any] = F"down_blocks.{i}.attentions.{j}"
UpperCAmelCase_ : Union[str, Any] = F"input_blocks.{current_layer}.1"
UpperCAmelCase_ : Union[str, Any] = convert_attention(
A , A , A , A , A )
current_layer += 1
if i != len(A ) - 1:
UpperCAmelCase_ : Optional[Any] = F"down_blocks.{i}.downsamplers.0"
UpperCAmelCase_ : Any = F"input_blocks.{current_layer}.0"
UpperCAmelCase_ : str = convert_resnet(A , A , A , A )
current_layer += 1
UpperCAmelCase_ : List[str] = current_channels
# hardcoded the mid-block for now
UpperCAmelCase_ : Union[str, Any] = '''mid_block.resnets.0'''
UpperCAmelCase_ : str = '''middle_block.0'''
UpperCAmelCase_ : str = convert_resnet(A , A , A , A )
UpperCAmelCase_ : str = '''mid_block.attentions.0'''
UpperCAmelCase_ : Tuple = '''middle_block.1'''
UpperCAmelCase_ : Any = convert_attention(A , A , A , A , A )
UpperCAmelCase_ : Optional[int] = '''mid_block.resnets.1'''
UpperCAmelCase_ : Union[str, Any] = '''middle_block.2'''
UpperCAmelCase_ : List[str] = convert_resnet(A , A , A , A )
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Tuple = unet_config['''up_block_types''']
for i, layer_type in enumerate(A ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ : Tuple = F"up_blocks.{i}.resnets.{j}"
UpperCAmelCase_ : Optional[int] = F"output_blocks.{current_layer}.0"
UpperCAmelCase_ : str = convert_resnet(A , A , A , A , has_skip=A )
current_layer += 1
if i != len(A ) - 1:
UpperCAmelCase_ : int = F"up_blocks.{i}.upsamplers.0"
UpperCAmelCase_ : Dict = F"output_blocks.{current_layer-1}.1"
UpperCAmelCase_ : int = convert_resnet(A , A , A , A )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ : Any = F"up_blocks.{i}.resnets.{j}"
UpperCAmelCase_ : int = F"output_blocks.{current_layer}.0"
UpperCAmelCase_ : Optional[int] = convert_resnet(A , A , A , A , has_skip=A )
UpperCAmelCase_ : int = F"up_blocks.{i}.attentions.{j}"
UpperCAmelCase_ : Any = F"output_blocks.{current_layer}.1"
UpperCAmelCase_ : Any = convert_attention(
A , A , A , A , A )
current_layer += 1
if i != len(A ) - 1:
UpperCAmelCase_ : Optional[Any] = F"up_blocks.{i}.upsamplers.0"
UpperCAmelCase_ : Dict = F"output_blocks.{current_layer-1}.2"
UpperCAmelCase_ : Union[str, Any] = convert_resnet(A , A , A , A )
UpperCAmelCase_ : Optional[int] = checkpoint['''out.0.weight''']
UpperCAmelCase_ : Optional[Any] = checkpoint['''out.0.bias''']
UpperCAmelCase_ : Tuple = checkpoint['''out.2.weight''']
UpperCAmelCase_ : str = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
_UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
_UpperCamelCase : Union[str, Any] = parser.parse_args()
_UpperCamelCase : Dict = strabool(args.class_cond)
_UpperCamelCase : str = os.path.basename(args.unet_path)
print(f'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
_UpperCamelCase : int = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_UpperCamelCase : Any = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_UpperCamelCase : str = TEST_UNET_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
_UpperCamelCase : List[Any] = None
_UpperCamelCase : Dict = con_pt_to_diffuser(args.unet_path, unet_config)
_UpperCamelCase : Tuple = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_UpperCamelCase : List[str] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_UpperCamelCase : Optional[Any] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_UpperCamelCase : Optional[int] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
_UpperCamelCase : Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config)
_UpperCamelCase : List[Any] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 304
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : str ) -> list[int]:
return [ord(A ) - 9_6 for elem in plain]
def __UpperCAmelCase ( A : list[int] ) -> str:
return "".join(chr(elem + 9_6 ) for elem in encoded )
def __UpperCAmelCase ( ) -> None:
UpperCAmelCase_ : Tuple = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , A )
print('''Decoded:''' , decode(A ) )
if __name__ == "__main__":
main()
| 304
| 1
|
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
super().__init__()
self.register_modules(vqvae=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 50 , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> Union[Tuple, ImagePipelineOutput]:
__magic_name__ : Dict = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCAmelCase__ , )
__magic_name__ : Any = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__magic_name__ : Tuple = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCAmelCase__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
__magic_name__ : Dict = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__magic_name__ : List[Any] = {}
if accepts_eta:
__magic_name__ : Tuple = eta
for t in self.progress_bar(self.scheduler.timesteps ):
__magic_name__ : Union[str, Any] = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
# predict the noise residual
__magic_name__ : Union[str, Any] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
__magic_name__ : List[Any] = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
# decode the image latents with the VAE
__magic_name__ : int = self.vqvae.decode(lowerCAmelCase__ ).sample
__magic_name__ : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
__magic_name__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__magic_name__ : Optional[Any] = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 363
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class snake_case__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Any = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
__magic_name__ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
__magic_name__ : Tuple = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
__magic_name__ : List[Any] = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
__magic_name__ : Any = shift_tokens_right(lowerCAmelCase__ , model.config.pad_token_id , model.config.decoder_start_token_id )
__magic_name__ : List[Any] = model(lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ ).logits
__magic_name__ : Tuple = optax.softmax_cross_entropy(lowerCAmelCase__ , onehot(lowerCAmelCase__ , logits.shape[-1] ) ).mean()
__magic_name__ : List[Any] = -(labels.shape[-1] * loss.item())
__magic_name__ : List[Any] = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 138
| 0
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = r'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class __UpperCamelCase ( UpperCAmelCase__ ):
"""simple docstring"""
@add_start_docstrings(snake_case__ )
def __call__( self : Tuple , _A : torch.LongTensor , _A : torch.FloatTensor , **_A : str ):
"""simple docstring"""
raise NotImplementedError('''StoppingCriteria needs to be subclassed''' )
class __UpperCamelCase ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self : Optional[int] , _A : int , _A : Optional[int] = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = max_length
__SCREAMING_SNAKE_CASE : str = max_position_embeddings
@add_start_docstrings(snake_case__ )
def __call__( self : Optional[int] , _A : torch.LongTensor , _A : torch.FloatTensor , **_A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = input_ids.shape[-1]
__SCREAMING_SNAKE_CASE : List[Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
'''exceptions, performance degradation, or nothing at all.''' )
return is_done
class __UpperCamelCase ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , _A : int , _A : int ):
"""simple docstring"""
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
'''with `max_length = start_length + max_new_tokens` instead.''' , snake_case__ , )
__SCREAMING_SNAKE_CASE : Dict = start_length
__SCREAMING_SNAKE_CASE : List[Any] = max_new_tokens
__SCREAMING_SNAKE_CASE : Dict = start_length + max_new_tokens
@add_start_docstrings(snake_case__ )
def __call__( self : Optional[int] , _A : torch.LongTensor , _A : torch.FloatTensor , **_A : List[Any] ):
"""simple docstring"""
return input_ids.shape[-1] >= self.max_length
class __UpperCamelCase ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self : Optional[int] , _A : float , _A : Optional[float] = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = max_time
__SCREAMING_SNAKE_CASE : List[str] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(snake_case__ )
def __call__( self : Tuple , _A : torch.LongTensor , _A : torch.FloatTensor , **_A : Optional[int] ):
"""simple docstring"""
return time.time() - self.initial_timestamp > self.max_time
class __UpperCamelCase ( UpperCAmelCase__ ):
"""simple docstring"""
@add_start_docstrings(snake_case__ )
def __call__( self : List[Any] , _A : torch.LongTensor , _A : torch.FloatTensor , **_A : Any ):
"""simple docstring"""
return any(criteria(snake_case__ , snake_case__ ) for criteria in self )
@property
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
for stopping_criterium in self:
if isinstance(snake_case__ , snake_case__ ):
return stopping_criterium.max_length
elif isinstance(snake_case__ , snake_case__ ):
return stopping_criterium.max_length
return None
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = stopping_criteria.max_length
__SCREAMING_SNAKE_CASE : Any = deepcopy(snake_case_ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , snake_case_ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=snake_case_ ) )
return new_stopping_criteria
| 303
|
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = len(snake_case_ )
for i in range(snake_case_ ):
for j in range(i + 1 , snake_case_ ):
if numbers[j] < numbers[i]:
_UpperCAmelCase , _UpperCAmelCase = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowercase_ : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
lowercase_ : Dict = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 133
| 0
|
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
a_ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
a_ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
a_ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __magic_name__ ( self : List[Any] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __magic_name__ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : str=False ) -> int:
if return_pvalue:
SCREAMING_SNAKE_CASE__ : List[str] =pearsonr(__lowercase , __lowercase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__lowercase , __lowercase )[0] )}
| 356
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""pixel_values"""]
def __init__( self : List[str] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 2_55 , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : List[Any] , ) -> None:
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any =size if size is not None else {'''shortest_edge''': 2_56}
SCREAMING_SNAKE_CASE__ : str =get_size_dict(__lowercase , default_to_square=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
SCREAMING_SNAKE_CASE__ : Tuple =get_size_dict(__lowercase , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__ : int =do_resize
SCREAMING_SNAKE_CASE__ : Dict =size
SCREAMING_SNAKE_CASE__ : List[str] =resample
SCREAMING_SNAKE_CASE__ : List[Any] =do_center_crop
SCREAMING_SNAKE_CASE__ : str =crop_size
SCREAMING_SNAKE_CASE__ : List[str] =do_rescale
SCREAMING_SNAKE_CASE__ : Optional[Any] =rescale_factor
SCREAMING_SNAKE_CASE__ : List[str] =do_normalize
SCREAMING_SNAKE_CASE__ : Any =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : Optional[Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __magic_name__ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : str =get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =get_resize_output_image_size(__lowercase , size=size['''shortest_edge'''] , default_to_square=__lowercase )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : int , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] =get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(__lowercase , size=(size['''height'''], size['''width''']) , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : Optional[Any] , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Tuple ) -> np.ndarray:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : Dict , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : List[Any] , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : int , ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[Any] =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : int =size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Optional[int] =get_size_dict(__lowercase , default_to_square=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Optional[Any] =do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : List[str] =crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : int =get_size_dict(__lowercase , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : List[str] =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[Any] =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Any =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : str =make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Dict =[to_numpy_array(__lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[Any] =[self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[Any] =[self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Tuple =[self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : str =[to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Any ={'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : Tuple , __lowercase : List[Tuple] = None ) -> Any:
SCREAMING_SNAKE_CASE__ : Optional[int] =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__lowercase ) != len(__lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__lowercase ):
SCREAMING_SNAKE_CASE__ : int =target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : List[Any] =[]
for idx in range(len(__lowercase ) ):
SCREAMING_SNAKE_CASE__ : Any =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any =logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : Optional[int] =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 222
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : int = tempfile.mkdtemp()
__snake_case : Dict = BlipImageProcessor()
__snake_case : Optional[Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
__snake_case : Union[str, Any] = BlipProcessor(_A , _A )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).tokenizer
def UpperCAmelCase ( self , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).image_processor
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : int = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case : str = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Tuple = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__snake_case : Optional[int] = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
__snake_case : Union[str, Any] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : List[str] = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : Tuple = BlipProcessor(tokenizer=_A , image_processor=_A )
__snake_case : Optional[Any] = self.prepare_image_inputs()
__snake_case : int = image_processor(_A , return_tensors="np" )
__snake_case : List[Any] = processor(images=_A , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case : Optional[int] = self.get_image_processor()
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : Any = BlipProcessor(tokenizer=_A , image_processor=_A )
__snake_case : Optional[int] = "lower newer"
__snake_case : Tuple = processor(text=_A )
__snake_case : Optional[Any] = tokenizer(_A , return_token_type_ids=_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case : Tuple = self.get_image_processor()
__snake_case : str = self.get_tokenizer()
__snake_case : Any = BlipProcessor(tokenizer=_A , image_processor=_A )
__snake_case : Tuple = "lower newer"
__snake_case : Union[str, Any] = self.prepare_image_inputs()
__snake_case : Union[str, Any] = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_image_processor()
__snake_case : Tuple = self.get_tokenizer()
__snake_case : str = BlipProcessor(tokenizer=_A , image_processor=_A )
__snake_case : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : Union[str, Any] = processor.batch_decode(_A )
__snake_case : Any = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : Optional[int] = self.get_tokenizer()
__snake_case : Tuple = BlipProcessor(tokenizer=_A , image_processor=_A )
__snake_case : str = "lower newer"
__snake_case : Any = self.prepare_image_inputs()
__snake_case : int = processor(text=_A , images=_A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 326
|
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__A : int = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = list(s_dict.keys() )
for key in keys:
UpperCAmelCase = R'''.*/layers_(\d+)'''
UpperCAmelCase = key
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , UpperCamelCase__ )
UpperCAmelCase = R'''(encoder|decoder)\/'''
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = re.match(UpperCamelCase__ , UpperCamelCase__ ).groups()
if groups[0] == "encoder":
UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , UpperCamelCase__ )
UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , UpperCamelCase__ )
elif groups[0] == "decoder":
UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , UpperCamelCase__ )
UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , UpperCamelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCAmelCase = new_key.replace(UpperCamelCase__ , UpperCamelCase__ )
print(F"""{key} -> {new_key}""" )
UpperCAmelCase = s_dict.pop(UpperCamelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCAmelCase = s_dict[key].shape[0]
UpperCAmelCase = s_dict[key]
for idx in range(UpperCamelCase__ ):
UpperCAmelCase = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(UpperCamelCase__ )
return s_dict
__A : Optional[int] = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
import regex as re
with open(UpperCamelCase__ , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = re.findall(R'''(.*) = ([0-9.]*)''' , UpperCamelCase__ )
UpperCAmelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCAmelCase = float(UpperCamelCase__ ) if '''.''' in value else int(UpperCamelCase__ )
UpperCAmelCase = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , UpperCamelCase__ )[0]
UpperCAmelCase = str(activation[1] )
UpperCAmelCase = num_experts
UpperCAmelCase = SwitchTransformersConfig(**UpperCamelCase__ )
return config
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="./" , UpperCamelCase__=8 ) -> List[Any]:
'''simple docstring'''
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
UpperCAmelCase = checkpoints.load_tax_checkpoint(UpperCamelCase__ )
if gin_file is not None:
UpperCAmelCase = convert_gin_to_config(UpperCamelCase__ , UpperCamelCase__ )
else:
UpperCAmelCase = SwitchTransformersConfig.from_pretrained(UpperCamelCase__ )
UpperCAmelCase = SwitchTransformersForConditionalGeneration(UpperCamelCase__ )
UpperCAmelCase = flax_params['''target''']
UpperCAmelCase = flatten_dict(UpperCamelCase__ , sep='''/''' )
UpperCAmelCase = rename_keys(UpperCamelCase__ )
UpperCAmelCase = unflatten_dict(UpperCamelCase__ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCamelCase__ , UpperCamelCase__ )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
__A : Tuple = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 273
| 0
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCAmelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCAmelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict=1_0_0 , SCREAMING_SNAKE_CASE : Dict=" " ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = text.split(SCREAMING_SNAKE_CASE )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )]
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(SCREAMING_SNAKE_CASE ):
titles.append(title if title is not None else "" )
texts.append(SCREAMING_SNAKE_CASE )
return {"title": titles, "text": texts}
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : List[Any] = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=SCREAMING_SNAKE_CASE , padding="longest" , return_tensors="pt" )["input_ids"]
lowerCAmelCase : int = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE ) , return_dict=SCREAMING_SNAKE_CASE ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , ):
'''simple docstring'''
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase : Any = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase : Union[str, Any] = dataset.map(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase : List[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase : int = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase : List[str] = dataset.map(
partial(SCREAMING_SNAKE_CASE , ctx_encoder=SCREAMING_SNAKE_CASE , ctx_tokenizer=SCREAMING_SNAKE_CASE ) , batched=SCREAMING_SNAKE_CASE , batch_size=processing_args.batch_size , features=SCREAMING_SNAKE_CASE , )
# And finally save your dataset
lowerCAmelCase : Optional[int] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(SCREAMING_SNAKE_CASE )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase : List[Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=SCREAMING_SNAKE_CASE )
# And save the index
lowerCAmelCase : Optional[Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(SCREAMING_SNAKE_CASE )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : str =field(
default=str(Path(lowerCAmelCase_ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
a : str =field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
a : str =field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
a : Optional[str] =field(
default=str(Path(lowerCAmelCase_ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
a : int =field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : int =field(
default=7_68 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
a : int =field(
default=1_28 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCAmelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCAmelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 352
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 133
| 0
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _A , _A , _A = None , _A = None ) -> List[Any]:
super().__init__()
SCREAMING_SNAKE_CASE_ = pad_token_id
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = vocab
SCREAMING_SNAKE_CASE_ = merges
SCREAMING_SNAKE_CASE_ = BytePairTokenizer(_A , _A , sequence_length=_A )
@classmethod
def _UpperCamelCase ( cls , _A , *_A , **_A ) -> Any:
SCREAMING_SNAKE_CASE_ = [''' '''.join(_A ) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE_ = tokenizer.get_vocab()
return cls(_A , _A , *_A , **_A )
@classmethod
def _UpperCamelCase ( cls , _A , *_A , **_A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained(_A , *_A , **_A )
return cls.from_tokenizer(_A , *_A , **_A )
@classmethod
def _UpperCamelCase ( cls , _A ) -> Tuple:
return cls(**_A )
def _UpperCamelCase ( self ) -> Optional[int]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _UpperCamelCase ( self , _A , _A = None ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.tf_tokenizer(_A )
SCREAMING_SNAKE_CASE_ = tf.ones_like(_A )
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE_ = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pad_model_inputs(
_A , max_seq_length=_A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 299
|
from __future__ import annotations
__UpperCAmelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
SCREAMING_SNAKE_CASE_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the reference grid
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the action grid
SCREAMING_SNAKE_CASE_ = init[0]
SCREAMING_SNAKE_CASE_ = init[1]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE_ = [[f, g, x, y]]
SCREAMING_SNAKE_CASE_ = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE_ = False # flag set if we can't find expand
while not found and not resign:
if len(__lowerCamelCase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE_ = cell.pop()
SCREAMING_SNAKE_CASE_ = next_cell[2]
SCREAMING_SNAKE_CASE_ = next_cell[3]
SCREAMING_SNAKE_CASE_ = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE_ = True
else:
for i in range(len(__lowerCamelCase ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE_ = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE_ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE_ = g + cost
SCREAMING_SNAKE_CASE_ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = goal[0]
SCREAMING_SNAKE_CASE_ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE_ = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE_ = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE_ = xa
SCREAMING_SNAKE_CASE_ = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__lowerCamelCase ) ):
path.append(invpath[len(__lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCAmelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCAmelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCAmelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCAmelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCAmelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCAmelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCAmelCase = 99
__UpperCAmelCase , __UpperCAmelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 299
| 1
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase ( a_ , a_ , a_ ) -> tuple[float, list[float]]:
"""simple docstring"""
A_ : Any = list(range(len(a_ ) ) )
A_ : int = [v / w for v, w in zip(a_ , a_ )]
index.sort(key=lambda a_ : ratio[i] , reverse=a_ )
A_ : float = 0
A_ : list[float] = [0] * len(a_ )
for i in index:
if weight[i] <= capacity:
A_ : Any = 1
max_value += value[i]
capacity -= weight[i]
else:
A_ : List[str] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164
|
'''simple docstring'''
def UpperCAmelCase ( a_ , a_ ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 164
| 1
|
"""simple docstring"""
import torch
from transformers import AutoModel
class a ( torch.nn.Module ):
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: Union[str, Any]="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(UpperCAmelCase_ , self ).__init__()
A__ = AutoModel.from_pretrained(UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
A__ = torch.nn.CosineSimilarity(3 , 1e-0_8 )
A__ = torch.nn.Softmax(dim=1 )
def UpperCamelCase ( self: List[Any] , **UpperCamelCase: List[str] ):
"""simple docstring"""
return self.bert(**UpperCAmelCase_ ).last_hidden_state
def UpperCamelCase ( self: str , UpperCamelCase: Optional[int] ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=UpperCAmelCase_ )
def UpperCamelCase ( self: List[str] , UpperCamelCase: str , UpperCamelCase: Tuple , UpperCamelCase: Dict=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(UpperCAmelCase_ , UpperCAmelCase_ ) )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = W_supports['sizes'].tolist()
A__ = W_supports['start_token_id'].item()
A__ = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
A__ = self.BERT(**UpperCAmelCase_ )
A__ = self.BERT(**UpperCAmelCase_ )
A__ = None
A__ = None
A__ = W_supports['input_ids'] == start_token_id
A__ = W_supports['input_ids'] == end_token_id
for i, size in enumerate(UpperCAmelCase_ ):
if i == 0:
A__ = 0
else:
A__ = support_sizes[i - 1]
A__ = S[s : s + size][start_token_masks[s : s + size]]
A__ = S[s : s + size][end_token_masks[s : s + size]]
A__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
A__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
A__ = torch.vstack((p_starts, p_start) )
A__ = torch.vstack((p_ends, p_end) )
else:
A__ = p_start
A__ = p_end
return p_starts, p_ends
| 335
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__A : int = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
__A : Tuple = cvtColor(img, COLOR_BGR2GRAY)
def SCREAMING_SNAKE_CASE__ ( ) -> str:
'''simple docstring'''
lowerCAmelCase : List[Any] = cn.convert_to_negative(_UpperCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCAmelCase, 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : List[Any] = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : Any = imread('digital_image_processing/image_data/lena_small.jpg', 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase : Dict = canny.canny(_UpperCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def SCREAMING_SNAKE_CASE__ ( ) -> int:
'''simple docstring'''
assert gg.gaussian_filter(_UpperCAmelCase, 5, sigma=0.9 ).all()
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : Any = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
lowerCAmelCase : List[Any] = conv.img_convolve(_UpperCAmelCase, _UpperCAmelCase ).astype(_UpperCAmelCase )
assert res.any()
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
'''simple docstring'''
assert med.median_filter(_UpperCAmelCase, 3 ).any()
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : Any = sob.sobel_filter(_UpperCAmelCase )
assert grad.any() and theta.any()
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase : List[Any] = sp.make_sepia(_UpperCAmelCase, 20 )
assert sepia.all()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = bs.Burkes(imread(_UpperCAmelCase, 1 ), 120 )
burkes.process()
assert burkes.output_img.any()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = "digital_image_processing/image_data/lena_small.jpg", ) -> str:
'''simple docstring'''
lowerCAmelCase : int = rs.NearestNeighbour(imread(_UpperCAmelCase, 1 ), 400, 200 )
nn.process()
assert nn.output.any()
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : Dict = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCAmelCase : Dict = imread(_UpperCAmelCase, 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase : Any = 0
lowerCAmelCase : str = 0
lowerCAmelCase : List[Any] = image[x_coordinate][y_coordinate]
lowerCAmelCase : List[Any] = lbp.get_neighbors_pixel(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase : str = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
lowerCAmelCase : Tuple = lbp.local_binary_value(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
assert lbp_image.any()
| 138
| 0
|
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : Tuple , lowercase_ : List[Any] ) -> Optional[Any]:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def __call__( self : str ) -> Optional[Any]:
UpperCAmelCase : int = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
UpperCAmelCase : Any = 1
UpperCAmelCase : Dict = self.unet(lowercase_ , lowercase_ ).sample
UpperCAmelCase : Any = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
UpperCAmelCase : str = scheduler_output - scheduler_output + torch.ones_like(lowercase_ )
return result
| 280
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : VQModel , lowercase_ : UNetaDModel , lowercase_ : DDIMScheduler ) -> int:
super().__init__()
self.register_modules(vqvae=lowercase_ , unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : str , lowercase_ : int = 1 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : float = 0.0 , lowercase_ : int = 50 , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , **lowercase_ : Optional[Any] , ) -> Union[Tuple, ImagePipelineOutput]:
UpperCAmelCase : str = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowercase_ , )
UpperCAmelCase : Any = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase : Optional[Any] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowercase_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCAmelCase : Optional[int] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase : Tuple = {}
if accepts_eta:
UpperCAmelCase : List[str] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCAmelCase : Dict = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
# predict the noise residual
UpperCAmelCase : Dict = self.unet(lowercase_ , lowercase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Dict = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
# decode the image latents with the VAE
UpperCAmelCase : Any = self.vqvae.decode(lowercase_ ).sample
UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : Tuple = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 280
| 1
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
a_ :List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ :Optional[Any] = 256
class snake_case__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["melgan"]
def __init__( self : List[Any], _snake_case : Optional[int], _snake_case : Dict, _snake_case : Any, _snake_case : str, _snake_case : List[str], ) ->None:
super().__init__()
# From MELGAN
snake_case__ : int = math.log(1e-5 ) # Matches MelGAN training.
snake_case__ : List[str] = 4.0 # Largest value for most examples
snake_case__ : Dict = 1_2_8
self.register_modules(
notes_encoder=A_, continuous_encoder=A_, decoder=A_, scheduler=A_, melgan=A_, )
def lowercase_ ( self : List[Any], _snake_case : Any, _snake_case : Optional[int]=(-1.0, 1.0), _snake_case : Optional[int]=False ) ->List[str]:
snake_case__ , snake_case__ : Any = output_range
if clip:
snake_case__ : List[Any] = torch.clip(A_, self.min_value, self.max_value )
# Scale to [0, 1].
snake_case__ : int = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def lowercase_ ( self : List[str], _snake_case : Any, _snake_case : str=(-1.0, 1.0), _snake_case : int=False ) ->Optional[Any]:
snake_case__ , snake_case__ : List[str] = input_range
snake_case__ : Optional[int] = torch.clip(A_, A_, A_ ) if clip else outputs
# Scale to [0, 1].
snake_case__ : Optional[int] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def lowercase_ ( self : Union[str, Any], _snake_case : Optional[int], _snake_case : str, _snake_case : Optional[int] ) ->List[Any]:
snake_case__ : Optional[int] = input_tokens > 0
snake_case__ , snake_case__ : Dict = self.notes_encoder(
encoder_input_tokens=A_, encoder_inputs_mask=A_ )
snake_case__ , snake_case__ : Any = self.continuous_encoder(
encoder_inputs=A_, encoder_inputs_mask=A_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def lowercase_ ( self : Optional[int], _snake_case : str, _snake_case : List[str], _snake_case : str ) ->Union[str, Any]:
snake_case__ : str = noise_time
if not torch.is_tensor(A_ ):
snake_case__ : List[str] = torch.tensor([timesteps], dtype=torch.long, device=input_tokens.device )
elif torch.is_tensor(A_ ) and len(timesteps.shape ) == 0:
snake_case__ : Union[str, Any] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case__ : List[Any] = timesteps * torch.ones(input_tokens.shape[0], dtype=timesteps.dtype, device=timesteps.device )
snake_case__ : Union[str, Any] = self.decoder(
encodings_and_masks=A_, decoder_input_tokens=A_, decoder_noise_time=A_ )
return logits
@torch.no_grad()
def __call__( self : Optional[Any], _snake_case : Any, _snake_case : str = None, _snake_case : Optional[Any] = 1_0_0, _snake_case : str = True, _snake_case : Tuple = "numpy", _snake_case : Dict = None, _snake_case : Tuple = 1, ) ->Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_, A_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(A_ )}.''' )
snake_case__ : Optional[Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims], dtype=np.floataa )
snake_case__ : List[str] = np.zeros([1, 0, self.n_dims], np.floataa )
snake_case__ : Tuple = torch.ones((1, TARGET_FEATURE_LENGTH), dtype=A_, device=self.device )
for i, encoder_input_tokens in enumerate(A_ ):
if i == 0:
snake_case__ : Any = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device, dtype=self.decoder.dtype )
# The first chunk has no previous context.
snake_case__ : Tuple = torch.zeros((1, TARGET_FEATURE_LENGTH), dtype=A_, device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
snake_case__ : str = ones
snake_case__ : Dict = self.scale_features(
A_, output_range=[-1.0, 1.0], clip=A_ )
snake_case__ : Union[str, Any] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ), continuous_inputs=A_, continuous_mask=A_, )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
snake_case__ : List[str] = randn_tensor(
shape=encoder_continuous_inputs.shape, generator=A_, device=self.device, dtype=self.decoder.dtype, )
# set step values
self.scheduler.set_timesteps(A_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
snake_case__ : Dict = self.decode(
encodings_and_masks=A_, input_tokens=A_, noise_time=t / self.scheduler.config.num_train_timesteps, )
# Compute previous output: x_t -> x_t-1
snake_case__ : List[str] = self.scheduler.step(A_, A_, A_, generator=A_ ).prev_sample
snake_case__ : Dict = self.scale_to_features(A_, input_range=[-1.0, 1.0] )
snake_case__ : Optional[int] = mel[:1]
snake_case__ : Optional[int] = mel.cpu().float().numpy()
snake_case__ : List[Any] = np.concatenate([full_pred_mel, pred_mel[:1]], axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_, A_ )
logger.info('Generated segment', A_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' )
if output_type == "numpy":
snake_case__ : int = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
snake_case__ : Optional[Any] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A_ )
| 277
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A_ , 'embed_dim' ) )
self.parent.assertTrue(hasattr(A_ , 'num_heads' ) )
class lowercase :
def __init__( self , A_ , A_=13 , A_=64 , A_=3 , A_=[16, 48, 96] , A_=[1, 3, 6] , A_=[1, 2, 10] , A_=[7, 3, 3] , A_=[4, 2, 2] , A_=[2, 1, 1] , A_=[2, 2, 2] , A_=[False, False, True] , A_=[0.0, 0.0, 0.0] , A_=0.02 , A_=1e-12 , A_=True , A_=True , A_=2 , ) -> int:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_sizes
UpperCamelCase = patch_stride
UpperCamelCase = patch_padding
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = num_labels
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = num_heads
UpperCamelCase = stride_kv
UpperCamelCase = depth
UpperCamelCase = cls_token
UpperCamelCase = attention_drop_rate
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase = CvtModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
UpperCamelCase = (self.image_size, self.image_size)
UpperCamelCase , UpperCamelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCamelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCamelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = CvtForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : List[str] = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
__lowercase : Tuple = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
__lowercase : Tuple = False
__lowercase : Union[str, Any] = False
__lowercase : Optional[Any] = False
__lowercase : List[str] = False
__lowercase : Dict = False
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = CvtModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return
@unittest.skip(reason='Cvt does not output attentions' )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(A_ , A_ , A_ ):
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = len(self.model_tester.depth )
self.assertEqual(len(A_ ) , A_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
pass
@slow
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = CvtModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
# verify the logits
UpperCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
| 222
| 0
|
def lowerCamelCase__ ( snake_case_ : bytes ) -> str:
return "".join([hex(snake_case_ )[2:].zfill(2 ).upper() for byte in list(snake_case_ )] )
def lowerCamelCase__ ( snake_case_ : str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(snake_case_ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case_ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(snake_case_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def lowerCamelCase__ ( snake_case_ : str ) -> str:
return "".join(sorted(snake_case_ ) )
def lowerCamelCase__ ( snake_case_ : str ) -> list[str]:
return word_by_signature[signature(snake_case_ )]
snake_case_ = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
snake_case_ = sorted({word.strip().lower() for word in data.splitlines()})
snake_case_ = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
snake_case_ = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 238
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :List[str] = tempfile.mkdtemp()
# fmt: off
__UpperCamelCase :Any = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
__UpperCamelCase :Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
__UpperCamelCase :List[Any] = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
__UpperCamelCase :List[str] = os.path.join(self.tmpdirname , __lowercase)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(__lowercase , __lowercase)
def UpperCamelCase__ ( self , **__lowercase) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self , **__lowercase) -> Any:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self) -> Dict:
shutil.rmtree(self.tmpdirname)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__UpperCamelCase :Any = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :str = self.get_tokenizer()
__UpperCamelCase :List[str] = self.get_image_processor()
__UpperCamelCase :List[Any] = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase)
processor.save_pretrained(self.tmpdirname)
__UpperCamelCase :Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast))
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , __lowercase)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :List[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__UpperCamelCase :List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
__UpperCamelCase :Any = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0)
__UpperCamelCase :List[Any] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast))
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __lowercase)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Optional[Any] = self.get_image_processor()
__UpperCamelCase :Optional[Any] = self.get_tokenizer()
__UpperCamelCase :str = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :str = self.prepare_image_inputs()
__UpperCamelCase :Optional[int] = image_processor(__lowercase , return_tensors='''np''')
__UpperCamelCase :str = processor(images=__lowercase , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :List[Any] = self.get_image_processor()
__UpperCamelCase :List[Any] = self.get_tokenizer()
__UpperCamelCase :Any = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :Optional[Any] = '''lower newer'''
__UpperCamelCase :Optional[int] = processor(text=__lowercase)
__UpperCamelCase :Any = tokenizer(__lowercase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :str = self.get_image_processor()
__UpperCamelCase :int = self.get_tokenizer()
__UpperCamelCase :Any = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :Tuple = '''lower newer'''
__UpperCamelCase :Tuple = self.prepare_image_inputs()
__UpperCamelCase :Dict = processor(text=__lowercase , images=__lowercase)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with self.assertRaises(__lowercase):
processor()
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Dict = self.get_image_processor()
__UpperCamelCase :Dict = self.get_tokenizer()
__UpperCamelCase :Optional[int] = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase :Union[str, Any] = processor.batch_decode(__lowercase)
__UpperCamelCase :str = tokenizer.batch_decode(__lowercase)
self.assertListEqual(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :int = self.get_image_processor()
__UpperCamelCase :Tuple = self.get_tokenizer()
__UpperCamelCase :Optional[int] = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :Tuple = '''lower newer'''
__UpperCamelCase :Optional[int] = self.prepare_image_inputs()
__UpperCamelCase :Tuple = processor(text=__lowercase , images=__lowercase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 43
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowercase_ : Optional[Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
lowercase_ : str = []
lowercase_ : int = []
lowercase_ : Dict = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
lowercase_ : int = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'emoji': True,
},
}
]
lowercase_ : int = 0
for log in Path().glob('*.log'):
lowercase_ : int = 0
with open(log, 'r') as f:
for line in f:
lowercase_ : List[str] = json.loads(line)
if line.get('nodeid', '') != "":
lowercase_ : List[str] = line['nodeid']
if line.get('duration', None) is not None:
lowercase_ : Tuple = f"""{line["duration"]:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowercase_ : List[Any] = []
log.unlink()
lowercase_ : int = ''
lowercase_ : int = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
lowercase_ : Optional[Any] = []
lowercase_ : Any = {}
for test in failed_tests:
lowercase_ : List[str] = test[0].split('::')
lowercase_ : int = data[0].split('/')[-1]
if data[0] not in filesafailed:
lowercase_ : Dict = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowercase_ : Any = [test[0] for test in failed_table]
lowercase_ : Optional[Any] = list(set(files))
# Count number of instances in failed_tests
lowercase_ : Optional[Any] = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowercase_ : Optional[Any] = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
lowercase_ : List[Any] = 'Too many failed tests, please see the full report in the Action results.'
lowercase_ : Union[str, Any] = len(err) + 10
lowercase_ : Tuple = message[: 30_00 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
lowercase_ : int = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
lowercase_ : Union[str, Any] = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
lowercase_ : List[Any] = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
lowercase_ : List[Any] = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
lowercase_ : List[str] = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowercase_ : Any = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
lowercase_ : Any = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowercase_ : Optional[int] = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowercase_ : Tuple = row[0]
else:
lowercase_ : Tuple = ''
lowercase_ : int = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 133
| 0
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __UpperCAmelCase :
def __init__( self : int, __A : Dict, __A : Any=1_3, __A : List[str]=3_0, __A : int=2, __A : Union[str, Any]=3, __A : List[str]=True, __A : List[Any]=True, __A : List[str]=3_2, __A : int=5, __A : List[str]=4, __A : Tuple=3_7, __A : Tuple="gelu", __A : List[str]=0.1, __A : Dict=0.1, __A : Tuple=1_0, __A : str=0.0_2, __A : Dict=3, __A : Any=None, __A : Union[str, Any]=2, ):
UpperCAmelCase : Tuple = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : Optional[Any] = image_size
UpperCAmelCase : Any = patch_size
UpperCAmelCase : Any = num_channels
UpperCAmelCase : Dict = is_training
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : int = hidden_size
UpperCAmelCase : Optional[int] = num_hidden_layers
UpperCAmelCase : Tuple = num_attention_heads
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : str = type_sequence_label_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : int = scope
UpperCAmelCase : Optional[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase : str = (image_size // patch_size) ** 2
UpperCAmelCase : str = num_patches + 2
def __magic_name__ ( self : Any ):
UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Tuple = None
if self.use_labels:
UpperCAmelCase : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Any ):
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def __magic_name__ ( self : Dict, __A : Union[str, Any], __A : Union[str, Any], __A : List[Any] ):
UpperCAmelCase : Union[str, Any] = DeiTModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[int] = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : List[str], __A : List[Any], __A : List[Any], __A : Optional[Any] ):
UpperCAmelCase : Optional[int] = DeiTForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[Any] = model(__A )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase : str = 1
UpperCAmelCase : Union[str, Any] = DeiTForMaskedImageModeling(__A )
model.to(__A )
model.eval()
UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : str = model(__A )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def __magic_name__ ( self : Union[str, Any], __A : Dict, __A : int, __A : Optional[int] ):
UpperCAmelCase : str = self.type_sequence_label_size
UpperCAmelCase : Tuple = DeiTForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Any = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase : Optional[Any] = 1
UpperCAmelCase : int = DeiTForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : Union[str, Any] = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : int ):
UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : int = config_and_inputs
UpperCAmelCase : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Dict = DeiTModelTester(self )
UpperCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __magic_name__ ( self : List[str] ):
pass
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCAmelCase : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A, nn.Linear ) )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[str] = model_class(__A )
UpperCAmelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Optional[int] = [*signature.parameters.keys()]
UpperCAmelCase : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : int ):
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : List[str], __A : List[Any], __A : int, __A : Optional[Any]=False ):
UpperCAmelCase : Optional[Any] = super()._prepare_for_class(__A, __A, return_labels=__A )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __magic_name__ ( self : List[Any] ):
if not self.model_tester.is_training:
return
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : int = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__A )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCAmelCase : Tuple = model_class(__A )
model.to(__A )
model.train()
UpperCAmelCase : List[str] = self._prepare_for_class(__A, __A, return_labels=__A )
UpperCAmelCase : Union[str, Any] = model(**__A ).loss
loss.backward()
def __magic_name__ ( self : Dict ):
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase : List[str] = False
UpperCAmelCase : List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(__A ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCAmelCase : Any = model_class(__A )
model.gradient_checkpointing_enable()
model.to(__A )
model.train()
UpperCAmelCase : Dict = self._prepare_for_class(__A, __A, return_labels=__A )
UpperCAmelCase : Tuple = model(**__A ).loss
loss.backward()
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__A ),
*get_values(__A ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
UpperCAmelCase : Optional[int] = problem_type['''title''']
UpperCAmelCase : Union[str, Any] = problem_type['''num_labels''']
UpperCAmelCase : Optional[Any] = model_class(__A )
model.to(__A )
model.train()
UpperCAmelCase : List[Any] = self._prepare_for_class(__A, __A, return_labels=__A )
if problem_type["num_labels"] > 1:
UpperCAmelCase : int = inputs['''labels'''].unsqueeze(1 ).repeat(1, problem_type['''num_labels'''] )
UpperCAmelCase : Optional[int] = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__A ) as warning_list:
UpperCAmelCase : List[Any] = model(**__A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def __magic_name__ ( self : int ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Tuple = DeiTModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> List[str]:
UpperCAmelCase : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Optional[Any] ):
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[Any] = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
__A )
UpperCAmelCase : int = self.default_image_processor
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : Any = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : List[Any] = model(**__A )
# verify the logits
UpperCAmelCase : Union[str, Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : List[str] = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __magic_name__ ( self : str ):
UpperCAmelCase : Dict = DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''', torch_dtype=torch.floataa, device_map='''auto''' )
UpperCAmelCase : Tuple = self.default_image_processor
UpperCAmelCase : Optional[Any] = prepare_img()
UpperCAmelCase : int = image_processor(images=__A, return_tensors='''pt''' )
UpperCAmelCase : Dict = inputs.pixel_values.to(__A )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase : List[Any] = model(__A )
| 99
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_lowerCamelCase : List[str] = True
from torch.cuda.amp import autocast
_lowerCamelCase : Any = logging.getLogger(__name__)
@dataclass
class __UpperCAmelCase :
UpperCamelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to log verbose messages or not."""} , )
UpperCamelCase = field(
default=2.0 , metadata={"""help""": """Maximum temperature for gumbel softmax."""} )
UpperCamelCase = field(
default=0.5 , metadata={"""help""": """Minimum temperature for gumbel softmax."""} )
UpperCamelCase = field(
default=0.9_9_9_9_9_5 , metadata={"""help""": """Decay of gumbel temperature during training."""} )
def a__ ( UpperCAmelCase : ModelArguments , UpperCAmelCase : TrainingArguments ) -> Any:
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCAmelCase : Any = logging.WARNING
if model_args.verbose_logging:
UpperCAmelCase : Any = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
UpperCAmelCase : Any = logging.INFO
logger.setLevel(UpperCAmelCase )
@dataclass
class __UpperCAmelCase :
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCamelCase = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
UpperCamelCase = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
UpperCamelCase = field(
default="""file""" , metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""} , )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
UpperCamelCase = field(
default=1 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
UpperCamelCase = field(
default=2_0.0 , metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""} )
@dataclass
class __UpperCAmelCase :
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = "longest"
UpperCamelCase = None
UpperCamelCase = None
def __call__( self : int, __A : List[Dict[str, Union[List[int], torch.Tensor]]] ):
# reformat list to dict and set to pytorch format
UpperCAmelCase : List[Any] = self.feature_extractor.pad(
__A, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='''pt''', )
UpperCAmelCase : int = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
UpperCAmelCase : Tuple = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
UpperCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
UpperCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
UpperCAmelCase : Tuple = 1
UpperCAmelCase : int = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
UpperCAmelCase : Dict = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__A, min_masks=2, )
return batch
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Union[str, Any], *__A : int, __A : Dict=1, __A : Any=0, __A : Optional[Any]=1.0, **__A : Any ):
super().__init__(*__A, **__A )
UpperCAmelCase : Any = 0
UpperCAmelCase : Any = max_gumbel_temp
UpperCAmelCase : Optional[Any] = min_gumbel_temp
UpperCAmelCase : str = gumbel_temp_decay
def __magic_name__ ( self : Dict, __A : nn.Module, __A : Dict[str, Union[torch.Tensor, Any]] ):
model.train()
UpperCAmelCase : List[Any] = self._prepare_inputs(__A )
if self.use_amp:
with autocast():
UpperCAmelCase : Optional[Any] = self.compute_loss(__A, __A )
else:
UpperCAmelCase : Optional[int] = self.compute_loss(__A, __A )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
UpperCAmelCase : Optional[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCAmelCase : str = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
UpperCAmelCase : Any = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__A ).backward()
elif self.use_apex:
with amp.scale_loss(__A, self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__A )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) )
return loss.detach()
def a__ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()
configure_logger(UpperCAmelCase , UpperCAmelCase )
# Downloading and loading a dataset from the hub.
UpperCAmelCase : int = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
UpperCAmelCase : Union[str, Any] = DatasetDict()
UpperCAmelCase : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
UpperCAmelCase : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
UpperCAmelCase : Optional[Any] = DatasetDict()
UpperCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
UpperCAmelCase : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=UpperCAmelCase )
def prepare_dataset(UpperCAmelCase : Dict ):
# check that all files have the correct sampling rate
UpperCAmelCase , UpperCAmelCase : Optional[Any] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
UpperCAmelCase : str = datasets.map(
UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
UpperCAmelCase : int = vectorized_datasets.filter(
lambda UpperCAmelCase : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(UpperCAmelCase : Dict ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
UpperCAmelCase : Any = vectorized_datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
UpperCAmelCase : Optional[int] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
UpperCAmelCase : Any = WavaVecaForPreTraining(UpperCAmelCase )
UpperCAmelCase : int = DataCollatorForWavaVecaPretraining(model=UpperCAmelCase , feature_extractor=UpperCAmelCase )
UpperCAmelCase : Any = WavaVecaPreTrainer(
model=UpperCAmelCase , data_collator=UpperCAmelCase , args=UpperCAmelCase , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=UpperCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 99
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class A ( unittest.TestCase ):
def A__ ( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
lowercase__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(lowerCamelCase__ )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = """sshleifer/tiny-gpt2"""
lowercase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=lowerCamelCase__ , multi_process=lowerCamelCase__ , )
lowercase__ = TensorFlowBenchmark(lowerCamelCase__ )
lowercase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = """sgugger/tiny-distilbert-classification"""
lowercase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , only_pretrain_model=lowerCamelCase__ , )
lowercase__ = TensorFlowBenchmark(lowerCamelCase__ )
lowercase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__ = """sshleifer/tiny-gpt2"""
lowercase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , )
lowercase__ = TensorFlowBenchmark(lowerCamelCase__ )
lowercase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = """sshleifer/tiny-gpt2"""
lowercase__ = AutoConfig.from_pretrained(lowerCamelCase__ )
lowercase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=lowerCamelCase__ , multi_process=lowerCamelCase__ , )
lowercase__ = TensorFlowBenchmark(lowerCamelCase__ , [config] )
lowercase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = """sshleifer/tiny-gpt2"""
lowercase__ = AutoConfig.from_pretrained(lowerCamelCase__ )
lowercase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , )
lowercase__ = TensorFlowBenchmark(lowerCamelCase__ , [config] )
lowercase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ = """sshleifer/tiny-gpt2"""
lowercase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , )
lowercase__ = TensorFlowBenchmark(lowerCamelCase__ )
lowercase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__ = """sshleifer/tiny-gpt2"""
lowercase__ = AutoConfig.from_pretrained(lowerCamelCase__ )
lowercase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , )
lowercase__ = TensorFlowBenchmark(lowerCamelCase__ , [config] )
lowercase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = """patrickvonplaten/t5-tiny-random"""
lowercase__ = AutoConfig.from_pretrained(lowerCamelCase__ )
lowercase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , )
lowercase__ = TensorFlowBenchmark(lowerCamelCase__ , configs=[config] )
lowercase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = """sshleifer/tiny-gpt2"""
lowercase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=lowerCamelCase__ , multi_process=lowerCamelCase__ , )
lowercase__ = TensorFlowBenchmark(lowerCamelCase__ )
lowercase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=lowerCamelCase__ , save_to_csv=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowerCamelCase__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(lowerCamelCase__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(lowerCamelCase__ , """env.csv""" ) , multi_process=lowerCamelCase__ , )
lowercase__ = TensorFlowBenchmark(lowerCamelCase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(lowerCamelCase__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase__ , """env.csv""" ) ).exists() )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(lowerCamelCase__ ):
self.assertTrue(hasattr(lowerCamelCase__ , """sequential""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """cumulative""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """current""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowerCamelCase__ , """log.txt""" ) , log_print=lowerCamelCase__ , trace_memory_line_by_line=lowerCamelCase__ , eager_mode=lowerCamelCase__ , multi_process=lowerCamelCase__ , )
lowercase__ = TensorFlowBenchmark(lowerCamelCase__ )
lowercase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(lowerCamelCase__ , """log.txt""" ) ).exists() )
| 164
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Union[str, Any] = StableDiffusionInstructPixaPixPipeline
lowerCamelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
lowerCamelCase : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A__ ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase__ = CLIPTextModel(lowerCamelCase__ )
lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert("""RGB""" )
if str(lowerCamelCase__ ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(lowerCamelCase__ )
else:
lowercase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
lowercase__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
lowercase__ = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = sd_pipe(**lowerCamelCase__ ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
lowercase__ = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = """french fries"""
lowercase__ = sd_pipe(**lowerCamelCase__ , negative_prompt=lowerCamelCase__ )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
lowercase__ = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = [inputs["""prompt"""]] * 2
lowercase__ = np.array(inputs["""image"""] ).astype(np.floataa ) / 2_55.0
lowercase__ = torch.from_numpy(lowerCamelCase__ ).unsqueeze(0 ).to(lowerCamelCase__ )
lowercase__ = image / 2 + 0.5
lowercase__ = image.permute(0 , 3 , 1 , 2 )
lowercase__ = image.repeat(2 , 1 , 1 , 1 )
lowercase__ = sd_pipe(**lowerCamelCase__ ).images
lowercase__ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
lowercase__ = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" )
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
lowercase__ = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = sd_pipe(**lowerCamelCase__ ).images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = [round(lowerCamelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(lowerCamelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
lowercase__ = VaeImageProcessor(do_resize=lowerCamelCase__ , do_normalize=lowerCamelCase__ )
lowercase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = pipe(**self.get_dummy_inputs_by_type(lowerCamelCase__ , input_image_type="""pt""" ) )[0]
lowercase__ = components["""vae"""]
lowercase__ = self.get_dummy_inputs_by_type(lowerCamelCase__ , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowercase__ = vae.encode(inputs[image_param] ).latent_dist.mode()
lowercase__ = pipe(**lowerCamelCase__ )[0]
lowercase__ = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCamelCase__ , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self , lowerCamelCase__=0 ) -> int:
'''simple docstring'''
lowercase__ = torch.manual_seed(lowerCamelCase__ )
lowercase__ = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
lowercase__ = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**lowerCamelCase__ ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ )
lowercase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**lowerCamelCase__ ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ )
lowercase__ = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**lowerCamelCase__ ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ = 0
def callback_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> None:
lowercase__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase__ = latents[0, -3:, -3:, -1]
lowercase__ = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowercase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase__ = latents[0, -3:, -3:, -1]
lowercase__ = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowercase__ = False
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa )
lowercase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
pipe(**lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa )
lowercase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**lowerCamelCase__ )
lowercase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase__ = inputs["""image"""].resize((504, 504) )
lowercase__ = """timbrooks/instruct-pix2pix"""
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCamelCase__ , safety_checker=lowerCamelCase__ , )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
lowercase__ = pipe(**lowerCamelCase__ )
lowercase__ = output.images[0]
lowercase__ = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
lowercase__ = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 164
| 1
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowercase__ ={
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
lowercase__ ='ETAOINSHRDLCUMWFGYPBVKJXQZ'
lowercase__ ='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : int = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __UpperCamelCase ( lowerCAmelCase__ : tuple ):
return x[0]
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : int = get_letter_count(lowerCAmelCase__ )
__a : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCAmelCase__ )
__a : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCAmelCase__ )
__a : Dict = ''''''.join(freq_to_letter[freq] )
__a : List[str] = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCAmelCase__ , reverse=lowerCAmelCase__ )
__a : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : Any = get_frequency_order(lowerCAmelCase__ )
__a : str = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
lowercase__ ={
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
lowercase__ ={
'allenai/led-base-16384': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __UpperCamelCase ( ):
__a : Tuple = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__a : List[str] = bs[:]
__a : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase__ )
cs.append(2**8 + n )
n += 1
__a : Union[str, Any] = [chr(lowerCAmelCase__ ) for n in cs]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] ):
__a : Any = set()
__a : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__a : Tuple = char
return pairs
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__(self : Any , snake_case_ : int , snake_case_ : Dict , snake_case_ : Union[str, Any]="replace" , snake_case_ : Any="<s>" , snake_case_ : Any="</s>" , snake_case_ : Tuple="</s>" , snake_case_ : Dict="<s>" , snake_case_ : Any="<unk>" , snake_case_ : List[Any]="<pad>" , snake_case_ : Dict="<mask>" , snake_case_ : Dict=False , **snake_case_ : Tuple , ):
__a : List[str] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token
__a : Tuple = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token
__a : Union[str, Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token
__a : Dict = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token
__a : List[str] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token
__a : str = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__a : int = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , **snake_case_ , )
with open(snake_case_ , encoding='''utf-8''' ) as vocab_handle:
__a : Union[str, Any] = json.load(snake_case_ )
__a : Union[str, Any] = {v: k for k, v in self.encoder.items()}
__a : List[Any] = errors # how to handle errors in decoding
__a : Union[str, Any] = bytes_to_unicode()
__a : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case_ , encoding='''utf-8''' ) as merges_handle:
__a : List[Any] = merges_handle.read().split('''\n''' )[1:-1]
__a : int = [tuple(merge.split() ) for merge in bpe_merges]
__a : Tuple = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
__a : Optional[int] = {}
__a : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__a : str = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCAmelCase (self : int ):
return len(self.encoder )
def lowerCAmelCase (self : str ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase (self : Dict , snake_case_ : Optional[Any] ):
if token in self.cache:
return self.cache[token]
__a : str = tuple(snake_case_ )
__a : Optional[Any] = get_pairs(snake_case_ )
if not pairs:
return token
while True:
__a : Optional[int] = min(snake_case_ , key=lambda snake_case_ : self.bpe_ranks.get(snake_case_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__a , __a : Any = bigram
__a : Union[str, Any] = []
__a : List[Any] = 0
while i < len(snake_case_ ):
try:
__a : int = word.index(snake_case_ , snake_case_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__a : Optional[int] = j
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__a : Tuple = tuple(snake_case_ )
__a : List[Any] = new_word
if len(snake_case_ ) == 1:
break
else:
__a : Tuple = get_pairs(snake_case_ )
__a : Optional[int] = ''' '''.join(snake_case_ )
__a : Union[str, Any] = word
return word
def lowerCAmelCase (self : Dict , snake_case_ : str ):
__a : Optional[Any] = []
for token in re.findall(self.pat , snake_case_ ):
__a : str = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case_ ).split(''' ''' ) )
return bpe_tokens
def lowerCAmelCase (self : Optional[Any] , snake_case_ : int ):
return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : List[str] ):
return self.decoder.get(snake_case_ )
def lowerCAmelCase (self : Optional[int] , snake_case_ : str ):
__a : Any = ''''''.join(snake_case_ )
__a : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowerCAmelCase (self : List[Any] , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__a : Any = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__a : str = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + '''\n''' )
__a : Optional[int] = 0
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
__a : Union[str, Any] = token_index
writer.write(''' '''.join(snake_case_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowerCAmelCase (self : int , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a : Union[str, Any] = [self.cls_token_id]
__a : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase (self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def lowerCAmelCase (self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
__a : Union[str, Any] = [self.sep_token_id]
__a : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase (self : str , snake_case_ : int , snake_case_ : Union[str, Any]=False , **snake_case_ : str ):
__a : List[str] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case_ ) > 0 and not text[0].isspace()):
__a : str = ''' ''' + text
return (text, kwargs)
def lowerCAmelCase (self : Optional[int] , snake_case_ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case_ : Optional[int] = None , snake_case_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , ):
__a : int = super()._pad(
encoded_inputs=snake_case_ , max_length=snake_case_ , padding_strategy=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , )
# Load from model defaults
if return_attention_mask is None:
__a : Optional[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__a : List[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__a : List[Any] = len(encoded_inputs['''global_attention_mask'''] ) != len(snake_case_ )
if needs_to_be_padded:
__a : int = len(snake_case_ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__a : List[str] = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__a : List[str] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 90
| 1
|
from __future__ import annotations
class _A:
"""simple docstring"""
def __init__( self , _A ):
__A : Optional[int] = data
__A : Node | None = None
__A : Node | None = None
def _SCREAMING_SNAKE_CASE ( a ) -> None: # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def _SCREAMING_SNAKE_CASE ( a ) -> int:
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def _SCREAMING_SNAKE_CASE ( a ) -> bool:
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def _SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
__A : Optional[int] = Node(1 )
__A : Optional[Any] = Node(2 )
__A : str = Node(3 )
__A : Union[str, Any] = Node(4 )
__A : Optional[int] = Node(5 )
__A : List[str] = Node(6 )
__A : List[str] = Node(7 )
__A : Union[str, Any] = Node(8 )
__A : Optional[Any] = Node(9 )
print(is_full_binary_tree(a ) )
print(depth_of_tree(a ) )
print('Tree is: ' )
display(a )
if __name__ == "__main__":
main()
| 280
|
def _SCREAMING_SNAKE_CASE ( a ) -> bool:
return str(a ) == str(a )[::-1]
def _SCREAMING_SNAKE_CASE ( a ) -> int:
return int(a ) + int(str(a )[::-1] )
def _SCREAMING_SNAKE_CASE ( a = 1_00_00 ) -> int:
__A : int = []
for num in range(1 , a ):
__A : List[str] = 0
__A : List[Any] = num
while iterations < 50:
__A : str = sum_reverse(a )
iterations += 1
if is_palindrome(a ):
break
else:
lychrel_nums.append(a )
return len(a )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 280
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
A : int = logging.get_logger("transformers.models.speecht5")
A : Any = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
A : List[str] = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
A : Dict = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
A : int = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
A : Tuple = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
A : Optional[Any] = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
A : Optional[Any] = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
A : List[Any] = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
A : Optional[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
A : Dict = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
A : str = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
A : str = []
A : Any = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
A : str = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
A : int = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
A : List[str] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
for attribute in key.split("." ):
SCREAMING_SNAKE_CASE_ = getattr(__UpperCamelCase , __UpperCamelCase )
if weight_type is not None:
SCREAMING_SNAKE_CASE_ = getattr(__UpperCamelCase , __UpperCamelCase ).shape
else:
SCREAMING_SNAKE_CASE_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "running_mean":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "running_var":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "num_batches_tracked":
SCREAMING_SNAKE_CASE_ = value
else:
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = []
if task == "s2t":
SCREAMING_SNAKE_CASE_ = hf_model.speechta.encoder.prenet.feature_encoder
SCREAMING_SNAKE_CASE_ = MAPPING_S2T
SCREAMING_SNAKE_CASE_ = IGNORE_KEYS_S2T
elif task == "t2s":
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = MAPPING_T2S
SCREAMING_SNAKE_CASE_ = IGNORE_KEYS_T2S
elif task == "s2s":
SCREAMING_SNAKE_CASE_ = hf_model.speechta.encoder.prenet.feature_encoder
SCREAMING_SNAKE_CASE_ = MAPPING_S2S
SCREAMING_SNAKE_CASE_ = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(__UpperCamelCase , __UpperCamelCase ):
logger.info(F'''{name} was ignored''' )
continue
SCREAMING_SNAKE_CASE_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE_ = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = key.split(".*." )
if prefix in name and suffix in name:
SCREAMING_SNAKE_CASE_ = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
SCREAMING_SNAKE_CASE_ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ = name.split(__UpperCamelCase )[0].split("." )[-2]
SCREAMING_SNAKE_CASE_ = mapped_key.replace("*" , __UpperCamelCase )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ = "weight_v"
elif "bias" in name:
SCREAMING_SNAKE_CASE_ = "bias"
elif "weight" in name:
SCREAMING_SNAKE_CASE_ = "weight"
elif "running_mean" in name:
SCREAMING_SNAKE_CASE_ = "running_mean"
elif "running_var" in name:
SCREAMING_SNAKE_CASE_ = "running_var"
elif "num_batches_tracked" in name:
SCREAMING_SNAKE_CASE_ = "num_batches_tracked"
else:
SCREAMING_SNAKE_CASE_ = None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = full_name.split("conv_layers." )[-1]
SCREAMING_SNAKE_CASE_ = name.split("." )
SCREAMING_SNAKE_CASE_ = int(items[0] )
SCREAMING_SNAKE_CASE_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__UpperCamelCase )
@torch.no_grad()
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE_ = SpeechTaConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = SpeechTaConfig()
if task == "s2t":
SCREAMING_SNAKE_CASE_ = config.max_text_positions
SCREAMING_SNAKE_CASE_ = SpeechTaForSpeechToText(__UpperCamelCase )
elif task == "t2s":
SCREAMING_SNAKE_CASE_ = 1_8_7_6
SCREAMING_SNAKE_CASE_ = 6_0_0
SCREAMING_SNAKE_CASE_ = config.max_speech_positions
SCREAMING_SNAKE_CASE_ = SpeechTaForTextToSpeech(__UpperCamelCase )
elif task == "s2s":
SCREAMING_SNAKE_CASE_ = 1_8_7_6
SCREAMING_SNAKE_CASE_ = config.max_speech_positions
SCREAMING_SNAKE_CASE_ = SpeechTaForSpeechToSpeech(__UpperCamelCase )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
SCREAMING_SNAKE_CASE_ = SpeechTaTokenizer(__UpperCamelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ = AddedToken("<mask>" , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = SpeechTaProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase )
recursively_load_weights(fairseq_checkpoint["model"] , __UpperCamelCase , __UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(__UpperCamelCase )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
A : int = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 350
|
from __future__ import annotations
import numpy as np
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.shape(__UpperCamelCase )
if rows != columns:
SCREAMING_SNAKE_CASE_ = (
"'table' has to be of square shaped array but got a "
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) )
SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
SCREAMING_SNAKE_CASE_ = (table[i][j] - total) / upper[j][j]
SCREAMING_SNAKE_CASE_ = 1
for j in range(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
| 0
|
from __future__ import annotations
import numpy as np
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]:
UpperCamelCase__ : Tuple = np.shape(__lowerCamelCase )
if rows != columns:
UpperCamelCase__ : str = (
'''\'table\' has to be of square shaped array but got a '''
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(__lowerCamelCase )
UpperCamelCase__ : Union[str, Any] = np.zeros((rows, columns) )
UpperCamelCase__ : Optional[int] = np.zeros((rows, columns) )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
UpperCamelCase__ : List[Any] = sum(lower[i][k] * upper[k][j] for k in range(__lowerCamelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
UpperCamelCase__ : Optional[int] = (table[i][j] - total) / upper[j][j]
UpperCamelCase__ : Any = 1
for j in range(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase__ : str = sum(lower[i][k] * upper[k][j] for k in range(__lowerCamelCase ) )
UpperCamelCase__ : Union[str, Any] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 189
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = DDIMPipeline
_a = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_a = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
_a = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_a = False
def snake_case ( self : str )-> Optional[Any]:
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), )
lowerCamelCase__ : Optional[Any] =DDIMScheduler()
lowerCamelCase__ : List[Any] ={'''unet''': unet, '''scheduler''': scheduler}
return components
def snake_case ( self : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : Optional[Any]=0 )-> Optional[int]:
if str(lowerCamelCase ).startswith('''mps''' ):
lowerCamelCase__ : Dict =torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase__ : Optional[int] =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase__ : Tuple ={
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def snake_case ( self : Dict )-> str:
lowerCamelCase__ : Optional[Any] ='''cpu'''
lowerCamelCase__ : int =self.get_dummy_components()
lowerCamelCase__ : Optional[int] =self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : List[str] =self.get_dummy_inputs(lowerCamelCase )
lowerCamelCase__ : Any =pipe(**lowerCamelCase ).images
lowerCamelCase__ : Optional[Any] =image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 32, 32, 3) )
lowerCamelCase__ : Tuple =np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
lowerCamelCase__ : str =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase, 1E-3 )
def snake_case ( self : Union[str, Any] )-> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def snake_case ( self : Union[str, Any] )-> int:
super().test_save_load_local(expected_max_difference=3E-3 )
def snake_case ( self : List[Any] )-> List[Any]:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def snake_case ( self : Optional[Any] )-> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[Any] )-> List[str]:
lowerCamelCase__ : Optional[Any] ='''google/ddpm-cifar10-32'''
lowerCamelCase__ : Union[str, Any] =UNetaDModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =DDIMScheduler()
lowerCamelCase__ : int =DDIMPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase )
ddim.to(lowerCamelCase )
ddim.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Tuple =torch.manual_seed(0 )
lowerCamelCase__ : int =ddim(generator=lowerCamelCase, eta=0.0, output_type='''numpy''' ).images
lowerCamelCase__ : Tuple =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ : Any =np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self : Optional[int] )-> Any:
lowerCamelCase__ : str ='''google/ddpm-ema-bedroom-256'''
lowerCamelCase__ : Optional[int] =UNetaDModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Any =DDIMScheduler.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =DDIMPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase )
ddpm.to(lowerCamelCase )
ddpm.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : List[str] =torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] =ddpm(generator=lowerCamelCase, output_type='''numpy''' ).images
lowerCamelCase__ : Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCamelCase__ : Any =np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 238
| 0
|
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowercase__ = logging.get_logger(__name__)
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None ):
UpperCAmelCase : Optional[Any] = tesseract_config if tesseract_config is not None else ''
# apply OCR
UpperCAmelCase : List[Any] = to_pil_image(UpperCAmelCase_ )
UpperCAmelCase , UpperCAmelCase : Dict = pil_image.size
UpperCAmelCase : List[Any] = pytesseract.image_to_data(UpperCAmelCase_ , lang=UpperCAmelCase_ , output_type='dict' , config=UpperCAmelCase_ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
UpperCAmelCase : str = [idx for idx, word in enumerate(UpperCAmelCase_ ) if not word.strip()]
UpperCAmelCase : Tuple = [word for idx, word in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices]
UpperCAmelCase : int = [coord for idx, coord in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices]
UpperCAmelCase : List[Any] = [coord for idx, coord in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices]
UpperCAmelCase : str = [coord for idx, coord in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices]
UpperCAmelCase : Optional[Any] = [coord for idx, coord in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCAmelCase : List[str] = []
for x, y, w, h in zip(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[str] = [x, y, x + w, y + h]
actual_boxes.append(UpperCAmelCase_ )
# finally, normalize the bounding boxes
UpperCAmelCase : int = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) )
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = ["""pixel_values"""]
def __init__( self : str , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = "" , **lowercase_ : str , ) -> None:
super().__init__(**lowercase_ )
UpperCAmelCase : Tuple = size if size is not None else {'height': 224, 'width': 224}
UpperCAmelCase : Optional[Any] = get_size_dict(lowercase_ )
UpperCAmelCase : Union[str, Any] = do_resize
UpperCAmelCase : Optional[int] = size
UpperCAmelCase : List[Any] = resample
UpperCAmelCase : Dict = apply_ocr
UpperCAmelCase : Any = ocr_lang
UpperCAmelCase : int = tesseract_config
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> np.ndarray:
UpperCAmelCase : List[Any] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
UpperCAmelCase : Tuple = (size['height'], size['width'])
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : Any , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : str , ) -> PIL.Image.Image:
UpperCAmelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : Dict = size if size is not None else self.size
UpperCAmelCase : str = get_size_dict(lowercase_ )
UpperCAmelCase : str = resample if resample is not None else self.resample
UpperCAmelCase : Union[str, Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCAmelCase : str = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCAmelCase : int = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCAmelCase : List[Any] = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
UpperCAmelCase : List[str] = [to_numpy_array(lowercase_ ) for image in images]
if apply_ocr:
requires_backends(self , 'pytesseract' )
UpperCAmelCase : List[str] = []
UpperCAmelCase : Optional[Any] = []
for image in images:
UpperCAmelCase , UpperCAmelCase : Tuple = apply_tesseract(lowercase_ , lowercase_ , lowercase_ )
words_batch.append(lowercase_ )
boxes_batch.append(lowercase_ )
if do_resize:
UpperCAmelCase : List[str] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
UpperCAmelCase : Optional[Any] = [flip_channel_order(lowercase_ ) for image in images]
UpperCAmelCase : Dict = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase : Any = BatchFeature(data={'pixel_values': images} , tensor_type=lowercase_ )
if apply_ocr:
UpperCAmelCase : Dict = words_batch
UpperCAmelCase : str = boxes_batch
return data
| 280
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """perceiver"""
def __init__( self : str , lowercase_ : List[str]=256 , lowercase_ : List[str]=1_280 , lowercase_ : str=768 , lowercase_ : Tuple=1 , lowercase_ : str=26 , lowercase_ : List[Any]=8 , lowercase_ : int=8 , lowercase_ : List[str]=None , lowercase_ : Dict=None , lowercase_ : int="kv" , lowercase_ : Union[str, Any]=1 , lowercase_ : List[str]=1 , lowercase_ : Any="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : str=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Any=True , lowercase_ : Union[str, Any]=262 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Optional[int]=56 , lowercase_ : int=[368, 496] , lowercase_ : str=16 , lowercase_ : Optional[int]=1_920 , lowercase_ : Tuple=16 , lowercase_ : int=[1, 16, 224, 224] , **lowercase_ : Union[str, Any] , ) -> List[str]:
super().__init__(**lowercase_ )
UpperCAmelCase : Union[str, Any] = num_latents
UpperCAmelCase : List[Any] = d_latents
UpperCAmelCase : Dict = d_model
UpperCAmelCase : Dict = num_blocks
UpperCAmelCase : Optional[int] = num_self_attends_per_block
UpperCAmelCase : Optional[Any] = num_self_attention_heads
UpperCAmelCase : Optional[Any] = num_cross_attention_heads
UpperCAmelCase : Tuple = qk_channels
UpperCAmelCase : Optional[int] = v_channels
UpperCAmelCase : str = cross_attention_shape_for_attention
UpperCAmelCase : Union[str, Any] = self_attention_widening_factor
UpperCAmelCase : List[Any] = cross_attention_widening_factor
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : Dict = layer_norm_eps
UpperCAmelCase : Any = use_query_residual
# masked language modeling attributes
UpperCAmelCase : Any = vocab_size
UpperCAmelCase : List[Any] = max_position_embeddings
# image classification attributes
UpperCAmelCase : str = image_size
# flow attributes
UpperCAmelCase : Any = train_size
# multimodal autoencoding attributes
UpperCAmelCase : Any = num_frames
UpperCAmelCase : List[Any] = audio_samples_per_frame
UpperCAmelCase : Tuple = samples_per_patch
UpperCAmelCase : Union[str, Any] = output_shape
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase : Tuple = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def UpperCAmelCase_ ( self : str ) -> float:
return 1E-4
def UpperCAmelCase_ ( self : str , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , lowercase_ : int = 3 , lowercase_ : int = 40 , lowercase_ : int = 40 , ) -> Mapping[str, Any]:
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(lowercase_ , lowercase_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase : Tuple = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase : int = preprocessor.num_special_tokens_to_add(lowercase_ )
UpperCAmelCase : Union[str, Any] = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase : List[Any] = [' '.join(['a'] ) * seq_length] * batch_size
UpperCAmelCase : Union[str, Any] = dict(preprocessor(lowercase_ , return_tensors=lowercase_ ) )
UpperCAmelCase : Union[str, Any] = inputs.pop('input_ids' )
return inputs
elif isinstance(lowercase_ , lowercase_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase : Tuple = compute_effective_axis_dimension(lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase : Any = self._generate_dummy_images(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = dict(preprocessor(images=lowercase_ , return_tensors=lowercase_ ) )
UpperCAmelCase : Dict = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 280
| 1
|
import functools
def A_ ( A__ , A__ ) -> int:
# Validation
if not isinstance(A__ , A__ ) or not all(isinstance(A__ , A__ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(A__ ) != 3 or not all(isinstance(A__ , A__ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(A__ ) == 0:
return 0
if min(A__ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(A__ ) >= 366:
raise ValueError('All days elements should be less than 366' )
a__ : Tuple = set(A__ )
@functools.cache
def dynamic_programming(A__ ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self , lowercase) -> Dict:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss']):
a__ : int = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowercase)
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : Tuple = 'sshleifer/tiny-gpt2'
a__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=lowercase , multi_process=lowercase , )
a__ : List[Any] = TensorFlowBenchmark(lowercase)
a__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = 'sgugger/tiny-distilbert-classification'
a__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , only_pretrain_model=lowercase , )
a__ : str = TensorFlowBenchmark(lowercase)
a__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Union[str, Any] = 'sshleifer/tiny-gpt2'
a__ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
a__ : List[str] = TensorFlowBenchmark(lowercase)
a__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : str = 'sshleifer/tiny-gpt2'
a__ : str = AutoConfig.from_pretrained(lowercase)
a__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=lowercase , multi_process=lowercase , )
a__ : Optional[Any] = TensorFlowBenchmark(lowercase , [config])
a__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Dict = 'sshleifer/tiny-gpt2'
a__ : Union[str, Any] = AutoConfig.from_pretrained(lowercase)
a__ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
a__ : str = TensorFlowBenchmark(lowercase , [config])
a__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : Optional[int] = 'sshleifer/tiny-gpt2'
a__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
a__ : Tuple = TensorFlowBenchmark(lowercase)
a__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Optional[Any] = 'sshleifer/tiny-gpt2'
a__ : List[Any] = AutoConfig.from_pretrained(lowercase)
a__ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
a__ : Optional[Any] = TensorFlowBenchmark(lowercase , [config])
a__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Optional[int] = 'patrickvonplaten/t5-tiny-random'
a__ : Optional[int] = AutoConfig.from_pretrained(lowercase)
a__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
a__ : List[Any] = TensorFlowBenchmark(lowercase , configs=[config])
a__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU')) == 0 , 'Cannot do xla on CPU.')
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] = 'sshleifer/tiny-gpt2'
a__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=lowercase , multi_process=lowercase , )
a__ : int = TensorFlowBenchmark(lowercase)
a__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : str = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
a__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=lowercase , save_to_csv=lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase , 'inf_time.csv') , inference_memory_csv_file=os.path.join(lowercase , 'inf_mem.csv') , env_info_csv_file=os.path.join(lowercase , 'env.csv') , multi_process=lowercase , )
a__ : List[str] = TensorFlowBenchmark(lowercase)
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase , 'inf_time.csv')).exists())
self.assertTrue(Path(os.path.join(lowercase , 'inf_mem.csv')).exists())
self.assertTrue(Path(os.path.join(lowercase , 'env.csv')).exists())
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Union[str, Any] = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowercase):
self.assertTrue(hasattr(lowercase , 'sequential'))
self.assertTrue(hasattr(lowercase , 'cumulative'))
self.assertTrue(hasattr(lowercase , 'current'))
self.assertTrue(hasattr(lowercase , 'total'))
with tempfile.TemporaryDirectory() as tmp_dir:
a__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase , 'log.txt') , log_print=lowercase , trace_memory_line_by_line=lowercase , eager_mode=lowercase , multi_process=lowercase , )
a__ : List[str] = TensorFlowBenchmark(lowercase)
a__ : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
self.assertTrue(Path(os.path.join(lowercase , 'log.txt')).exists())
| 99
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class __A( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=10 , _snake_case=18 , _snake_case=30 , _snake_case=400 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=[0.5, 0.5, 0.5] , _snake_case=[0.5, 0.5, 0.5] , _snake_case=None , ) -> Tuple:
'''simple docstring'''
__a = size if size is not None else {'''shortest_edge''': 18}
__a = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__a = parent
__a = batch_size
__a = num_channels
__a = num_frames
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
__a = crop_size
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A( a , unittest.TestCase ):
snake_case_ = VivitImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = VivitImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(_snake_case , '''image_std''' ) )
self.assertTrue(hasattr(_snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(_snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(_snake_case , '''do_center_crop''' ) )
self.assertTrue(hasattr(_snake_case , '''size''' ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
__a = prepare_video_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for video in video_inputs:
self.assertIsInstance(_snake_case , _snake_case )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
__a = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_video_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for video in video_inputs:
self.assertIsInstance(_snake_case , _snake_case )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
__a = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_video_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for video in video_inputs:
self.assertIsInstance(_snake_case , _snake_case )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
__a = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 33
|
A : Optional[Any] = tuple[float, float, float]
A : Union[str, Any] = tuple[float, float, float]
def __lowerCAmelCase ( a__ , a__ ) -> Vectorad:
__a = end_pointa[0] - end_pointa[0]
__a = end_pointa[1] - end_pointa[1]
__a = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __lowerCAmelCase ( a__ , a__ ) -> Vectorad:
__a = ab[1] * ac[2] - ab[2] * ac[1] # *i
__a = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__a = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __lowerCAmelCase ( a__ , a__ ) -> bool:
return tuple(round(a__ , a__ ) for x in vector ) == (0, 0, 0)
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 10 ) -> bool:
__a = create_vector(a__ , a__ )
__a = create_vector(a__ , a__ )
return is_zero_vector(get_ad_vectors_cross(a__ , a__ ) , a__ )
| 33
| 1
|
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=sys.maxsize ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = 'bilinear'
__lowerCamelCase = max_size
__lowerCamelCase = short_edge_length
def __call__( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = []
for img in imgs:
__lowerCamelCase , __lowerCamelCase = img.shape[:2]
# later: provide list and randomly choose index for resize
__lowerCamelCase = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
__lowerCamelCase = size * 1.0 / min(lowerCamelCase__ , lowerCamelCase__ )
if h < w:
__lowerCamelCase , __lowerCamelCase = size, scale * w
else:
__lowerCamelCase , __lowerCamelCase = scale * h, size
if max(lowerCamelCase__ , lowerCamelCase__ ) > self.max_size:
__lowerCamelCase = self.max_size * 1.0 / max(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = newh * scale
__lowerCamelCase = neww * scale
__lowerCamelCase = int(neww + 0.5 )
__lowerCamelCase = int(newh + 0.5 )
if img.dtype == np.uinta:
__lowerCamelCase = Image.fromarray(lowerCamelCase__ )
__lowerCamelCase = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
__lowerCamelCase = np.asarray(lowerCamelCase__ )
else:
__lowerCamelCase = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
__lowerCamelCase = nn.functional.interpolate(
lowerCamelCase__ , (newh, neww) , mode=self.interp_method , align_corners=lowerCamelCase__ ).squeeze(0 )
img_augs.append(lowerCamelCase__ )
return img_augs
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
__lowerCamelCase = cfg.INPUT.FORMAT
__lowerCamelCase = cfg.SIZE_DIVISIBILITY
__lowerCamelCase = cfg.PAD_VALUE
__lowerCamelCase = cfg.INPUT.MAX_SIZE_TEST
__lowerCamelCase = cfg.MODEL.DEVICE
__lowerCamelCase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__lowerCamelCase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__lowerCamelCase = lambda lowerCamelCase__ : (x - self.pixel_mean) / self.pixel_std
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = tuple(max(lowerCamelCase__ ) for s in zip(*[img.shape for img in images] ) )
__lowerCamelCase = [im.shape[-2:] for im in images]
__lowerCamelCase = [
nn.functional.pad(
lowerCamelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCamelCase__ , lowerCamelCase__ )
]
return torch.stack(lowerCamelCase__ ), torch.tensor(lowerCamelCase__ )
def __call__( self , lowerCamelCase__ , lowerCamelCase__=False ) -> List[Any]:
'''simple docstring'''
with torch.no_grad():
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = [images]
if single_image:
assert len(lowerCamelCase__ ) == 1
for i in range(len(lowerCamelCase__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowerCamelCase__ , images.pop(lowerCamelCase__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowerCamelCase__ , torch.as_tensor(img_tensorize(images.pop(lowerCamelCase__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
__lowerCamelCase = torch.tensor([im.shape[:2] for im in images] )
__lowerCamelCase = self.aug(lowerCamelCase__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__lowerCamelCase = [self.normalizer(lowerCamelCase__ ) for x in images]
# now pad them to do the following operations
__lowerCamelCase , __lowerCamelCase = self.pad(lowerCamelCase__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__lowerCamelCase = torch.true_divide(lowerCamelCase__ , lowerCamelCase__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : str ) -> List[Any]:
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple[int, int] ) -> Dict:
"""simple docstring"""
assert torch.isfinite(UpperCamelCase__ ).all(), "Box tensor contains infinite or NaN!"
__lowerCamelCase , __lowerCamelCase = box_size
tensor[:, 0].clamp_(min=0 , max=UpperCamelCase__ )
tensor[:, 1].clamp_(min=0 , max=UpperCamelCase__ )
tensor[:, 2].clamp_(min=0 , max=UpperCamelCase__ )
tensor[:, 3].clamp_(min=0 , max=UpperCamelCase__ )
| 90
|
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__A = 10
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
for i in range(UpperCamelCase__ , UpperCamelCase__ ):
if array[i] == target:
return i
return -1
def lowerCamelCase_ ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = len(UpperCamelCase__ )
while left <= right:
if right - left < precision:
return lin_search(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = (left + right) // 3 + 1
__lowerCamelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__lowerCamelCase = one_third - 1
elif array[two_third] < target:
__lowerCamelCase = two_third + 1
else:
__lowerCamelCase = one_third + 1
__lowerCamelCase = two_third - 1
else:
return -1
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = (left + right) // 3 + 1
__lowerCamelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(UpperCamelCase__ , one_third - 1 , UpperCamelCase__ , UpperCamelCase__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCamelCase__ , UpperCamelCase__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = input("Enter numbers separated by comma:\n").strip()
__A = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__A = int(input("Enter the number to be found in the list:\n").strip())
__A = ite_ternary_search(collection, target)
__A = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("Not found")
| 90
| 1
|
from math import factorial
def lowerCAmelCase_ ( _snake_case : int = 100 ) -> int:
'''simple docstring'''
return sum(int(snake_case__ ) for x in str(factorial(snake_case__ ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 364
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
class _snake_case :
UpperCamelCase__ = 42
UpperCamelCase__ = None
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
raise NotImplementedError
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , **_a ):
raise NotImplementedError
def SCREAMING_SNAKE_CASE ( self , _a ):
raise NotImplementedError
def SCREAMING_SNAKE_CASE ( self ):
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def SCREAMING_SNAKE_CASE ( cls ):
return f'''`pip install {cls.pip_package or cls.name}`'''
class _snake_case ( snake_case ):
UpperCamelCase__ = 'optuna'
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
return is_optuna_available()
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , **_a ):
return run_hp_search_optuna(_a , _a , _a , **_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return default_hp_space_optuna(_a )
class _snake_case ( snake_case ):
UpperCamelCase__ = 'ray'
UpperCamelCase__ = '\'ray[tune]\''
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
return is_ray_available()
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , **_a ):
return run_hp_search_ray(_a , _a , _a , **_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return default_hp_space_ray(_a )
class _snake_case ( snake_case ):
UpperCamelCase__ = 'sigopt'
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
return is_sigopt_available()
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , **_a ):
return run_hp_search_sigopt(_a , _a , _a , **_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return default_hp_space_sigopt(_a )
class _snake_case ( snake_case ):
UpperCamelCase__ = 'wandb'
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
return is_wandb_available()
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , **_a ):
return run_hp_search_wandb(_a , _a , _a , **_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return default_hp_space_wandb(_a )
snake_case : int = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
__magic_name__ : List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_snake_case ) > 0:
__magic_name__ : Dict = available_backends[0].name
if len(_snake_case ) > 1:
logger.info(
F'''{len(_snake_case )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 41
| 0
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
a__ : List[str] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowercase_ ( UpperCAmelCase__ ):
__UpperCAmelCase = field(default=UpperCAmelCase__ , metadata={'help': 'Whether to use SortishSampler or not.'} )
__UpperCAmelCase = field(
default=UpperCAmelCase__ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
__UpperCAmelCase = field(
default=UpperCAmelCase__ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
__UpperCAmelCase = field(
default=UpperCAmelCase__ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
__UpperCAmelCase = field(
default=UpperCAmelCase__ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def __a ( self ):
UpperCamelCase__ = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase__ = v.to_dict()
return d
| 80
|
def UpperCamelCase ( __magic_name__ : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
lowercase__ = []
for temp in range(int(__magic_name__ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
A : Tuple = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 305
| 0
|
'''simple docstring'''
def _A ( snake_case , snake_case ) -> int:
return x if y == 0 else greatest_common_divisor(UpperCAmelCase__ , x % y )
def _A ( snake_case , snake_case ) -> int:
return (x * y) // greatest_common_divisor(UpperCAmelCase__ , UpperCAmelCase__ )
def _A ( snake_case = 20 ) -> int:
_lowercase : Dict = 1
for i in range(1 , n + 1 ):
_lowercase : Optional[int] = lcm(UpperCAmelCase__ , UpperCAmelCase__ )
return g
if __name__ == "__main__":
print(F'''{solution() = }''')
| 371
|
'''simple docstring'''
from timeit import timeit
def _A ( snake_case ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
_lowercase : Union[str, Any] = 0
while number:
number &= number - 1
result += 1
return result
def _A ( snake_case ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
_lowercase : int = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _A ( ) -> None:
def do_benchmark(snake_case ) -> None:
_lowercase : Optional[int] = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(snake_case ) = }''' )
_lowercase : int = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=snake_case )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(snake_case ) = }''' )
_lowercase : Optional[int] = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=snake_case , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(snake_case )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 199
| 0
|
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple:
__A : Dict = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__A : Optional[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__A : Optional[int] = 4
__A : List[Any] = 48
__A : Optional[Any] = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__A : Any = [6, 6, 6, 6]
__A : List[Any] = 60
__A : Union[str, Any] = [6, 6, 6, 6]
__A : str = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__A : List[str] = 4
__A : Union[str, Any] = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__A : Any = 1
__A : List[str] = 1
__A : Optional[Any] = 1_26
__A : Dict = 7
__A : int = 255.0
__A : Dict = ''
return config
def _SCREAMING_SNAKE_CASE ( a , a ) -> Dict:
if "patch_embed.proj" in name and "layers" not in name:
__A : Optional[int] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__A : Any = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
__A : Any = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
__A : Optional[int] = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
__A : Tuple = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__A : Optional[int] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__A : List[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__A : Optional[int] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__A : Optional[int] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__A : Optional[int] = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
__A : Optional[int] = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
__A : List[Any] = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
__A : Any = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
__A : List[str] = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
__A : Union[str, Any] = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
__A : Optional[int] = 'layernorm.weight'
if name == "norm.bias":
__A : Optional[Any] = 'layernorm.bias'
if "conv_first" in name:
__A : int = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__A : Union[str, Any] = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__A : Any = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
__A : Optional[int] = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
__A : Any = name.replace('upsample.2' , 'upsample.convolution_1' )
__A : List[str] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
__A : Optional[int] = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
__A : List[Any] = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
__A : Optional[Any] = 'swin2sr.' + name
return name
def _SCREAMING_SNAKE_CASE ( a , a ) -> int:
for key in orig_state_dict.copy().keys():
__A : List[str] = orig_state_dict.pop(a )
if "qkv" in key:
__A : List[Any] = key.split('.' )
__A : Optional[Any] = int(key_split[1] )
__A : Union[str, Any] = int(key_split[4] )
__A : Union[str, Any] = config.embed_dim
if "weight" in key:
__A : List[str] = val[:dim, :]
__A : Tuple = val[dim : dim * 2, :]
__A : Optional[Any] = val[-dim:, :]
else:
__A : Union[str, Any] = val[:dim]
__A : Optional[int] = val[dim : dim * 2]
__A : str = val[-dim:]
pass
else:
__A : List[str] = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Any:
__A : Optional[int] = get_config(a )
__A : str = SwinaSRForImageSuperResolution(a )
model.eval()
__A : Optional[Any] = torch.hub.load_state_dict_from_url(a , map_location='cpu' )
__A : Any = convert_state_dict(a , a )
__A , __A : List[Any] = model.load_state_dict(a , strict=a )
if len(a ) > 0:
raise ValueError('Missing keys when converting: {}'.format(a ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"""Unexpected key {key} in state_dict""" )
# verify values
__A : Union[str, Any] = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
__A : Dict = Image.open(requests.get(a , stream=a ).raw ).convert('RGB' )
__A : Tuple = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__A : Any = 1_26 if 'Jpeg' in checkpoint_url else 2_56
__A : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__A : Tuple = transforms(a ).unsqueeze(0 )
if config.num_channels == 1:
__A : Union[str, Any] = pixel_values[:, 0, :, :].unsqueeze(1 )
__A : str = model(a )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__A : List[Any] = torch.Size([1, 3, 5_12, 5_12] )
__A : Optional[int] = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__A : List[str] = torch.Size([1, 3, 10_24, 10_24] )
__A : Optional[int] = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__A : Union[str, Any] = torch.Size([1, 3, 10_24, 10_24] )
__A : Optional[int] = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__A : Optional[Any] = torch.Size([1, 3, 5_12, 5_12] )
__A : Dict = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__A : Optional[int] = torch.Size([1, 3, 10_24, 10_24] )
__A : Any = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , a , atol=1e-3 )
print('Looks ok!' )
__A : Optional[Any] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
__A : List[Any] = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(a )
if push_to_hub:
model.push_to_hub(F"""caidas/{model_name}""" )
processor.push_to_hub(F"""caidas/{model_name}""" )
if __name__ == "__main__":
UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 280
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _A:
"""simple docstring"""
def __init__( self , _A = None ):
if components is None:
__A : int = []
__A : Tuple = list(_A )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(_A , self.__components ) ) + ")"
def __add__( self , _A ):
__A : Optional[int] = len(self )
if size == len(_A ):
__A : Any = [self.__components[i] + other.component(_A ) for i in range(_A )]
return Vector(_A )
else:
raise Exception('must have the same size' )
def __sub__( self , _A ):
__A : Tuple = len(self )
if size == len(_A ):
__A : Union[str, Any] = [self.__components[i] - other.component(_A ) for i in range(_A )]
return Vector(_A )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , _A ):
...
@overload
def __mul__( self , _A ):
...
def __mul__( self , _A ):
if isinstance(_A , (float, int) ):
__A : str = [c * other for c in self.__components]
return Vector(_A )
elif isinstance(_A , _A ) and len(self ) == len(_A ):
__A : Union[str, Any] = len(self )
__A : Dict = [self.__components[i] * other.component(_A ) for i in range(_A )]
return sum(_A )
else: # error case
raise Exception('invalid operand!' )
def UpperCAmelCase_ ( self ):
return Vector(self.__components )
def UpperCAmelCase_ ( self , _A ):
if isinstance(_A , _A ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCAmelCase_ ( self , _A , _A ):
assert -len(self.__components ) <= pos < len(self.__components )
__A : Optional[int] = value
def UpperCAmelCase_ ( self ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__A : Optional[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(_A ) )
def UpperCAmelCase_ ( self , _A , _A = False ):
__A : Optional[Any] = self * other
__A : Optional[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _SCREAMING_SNAKE_CASE ( a ) -> Vector:
assert isinstance(a , a )
return Vector([0] * dimension )
def _SCREAMING_SNAKE_CASE ( a , a ) -> Vector:
assert isinstance(a , a ) and (isinstance(a , a ))
__A : Optional[Any] = [0] * dimension
__A : Tuple = 1
return Vector(a )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Vector:
assert (
isinstance(a , a )
and isinstance(a , a )
and (isinstance(a , (int, float) ))
)
return x * scalar + y
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Vector:
random.seed(a )
__A : str = [random.randint(a , a ) for _ in range(a )]
return Vector(a )
class _A:
"""simple docstring"""
def __init__( self , _A , _A , _A ):
__A : Optional[Any] = matrix
__A : Dict = w
__A : Optional[int] = h
def __str__( self ):
__A : Tuple = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , _A ):
if self.__width == other.width() and self.__height == other.height():
__A : Optional[Any] = []
for i in range(self.__height ):
__A : Optional[Any] = [
self.__matrix[i][j] + other.component(_A , _A )
for j in range(self.__width )
]
matrix.append(_A )
return Matrix(_A , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , _A ):
if self.__width == other.width() and self.__height == other.height():
__A : Tuple = []
for i in range(self.__height ):
__A : str = [
self.__matrix[i][j] - other.component(_A , _A )
for j in range(self.__width )
]
matrix.append(_A )
return Matrix(_A , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , _A ):
...
@overload
def __mul__( self , _A ):
...
def __mul__( self , _A ):
if isinstance(_A , _A ): # matrix-vector
if len(_A ) == self.__width:
__A : List[Any] = zero_vector(self.__height )
for i in range(self.__height ):
__A : List[str] = [
self.__matrix[i][j] * other.component(_A )
for j in range(self.__width )
]
ans.change_component(_A , sum(_A ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(_A , (int, float) ): # matrix-scalar
__A : List[str] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(_A , self.__width , self.__height )
return None
def UpperCAmelCase_ ( self ):
return self.__height
def UpperCAmelCase_ ( self ):
return self.__width
def UpperCAmelCase_ ( self , _A , _A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCAmelCase_ ( self , _A , _A , _A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
__A : int = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCAmelCase_ ( self , _A , _A ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__A : List[str] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(_A ) ):
__A : Optional[int] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(_A , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCAmelCase_ ( self , _A , _A ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(_A , _A )
else:
raise Exception('Indices out of bounds' )
def UpperCAmelCase_ ( self ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__A : List[str] = [
self.__matrix[0][y] * self.cofactor(0 , _A ) for y in range(self.__width )
]
return sum(_A )
def _SCREAMING_SNAKE_CASE ( a ) -> Matrix:
__A : list[list[float]] = [[0] * n for _ in range(a )]
return Matrix(a , a , a )
def _SCREAMING_SNAKE_CASE ( a , a , a , a ) -> Matrix:
random.seed(a )
__A : list[list[float]] = [
[random.randint(a , a ) for _ in range(a )] for _ in range(a )
]
return Matrix(a , a , a )
| 280
| 1
|
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__snake_case = 3
def _lowercase ( UpperCamelCase_ ) -> int:
'''simple docstring'''
print('Generating primitive root of p' )
while True:
SCREAMING_SNAKE_CASE__ = random.randrange(3 , UpperCamelCase_ )
if pow(UpperCamelCase_ , 2 , UpperCamelCase_ ) == 1:
continue
if pow(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) == 1:
continue
return g
def _lowercase ( UpperCamelCase_ ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
'''simple docstring'''
print('Generating prime p...' )
SCREAMING_SNAKE_CASE__ = rabin_miller.generate_large_prime(UpperCamelCase_ ) # select large prime number.
SCREAMING_SNAKE_CASE__ = primitive_root(UpperCamelCase_ ) # one primitive root on modulo p.
SCREAMING_SNAKE_CASE__ = random.randrange(3 , UpperCamelCase_ ) # private_key -> have to be greater than 2 for safety.
SCREAMING_SNAKE_CASE__ = cryptomath.find_mod_inverse(pow(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = (key_size, e_a, e_a, p)
SCREAMING_SNAKE_CASE__ = (key_size, d)
return public_key, private_key
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> None:
'''simple docstring'''
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print('\nWARNING:' )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.' )
sys.exit()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = generate_key(UpperCamelCase_ )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , 'w' ) as fo:
fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , 'w' ) as fo:
fo.write(F'{private_key[0]},{private_key[1]}' )
def _lowercase ( ) -> None:
'''simple docstring'''
print('Making key files...' )
make_key_files('elgamal' , 2048 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 362
|
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__snake_case = logging.get_logger(__name__)
class lowercase__ ( _UpperCAmelCase ):
def __init__( self : Dict , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[Any] ):
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 169
| 0
|
"""simple docstring"""
import math
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , A : Optional[int]=0 ) -> Union[str, Any]: # a graph with Node 0,1,...,N-1
lowercase_ : Any = n
lowercase_ : Any = [
[math.inf for j in range(0 , A )] for i in range(0 , A )
] # adjacency matrix for weight
lowercase_ : List[Any] = [
[math.inf for j in range(0 , A )] for i in range(0 , A )
] # dp[i][j] stores minimum distance from i to j
def A ( self : List[str] , A : Dict , A : str , A : Optional[Any] ) -> Any:
lowercase_ : Any = w
def A ( self : Optional[Any] ) -> Tuple:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowercase_ : str = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def A ( self : Any , A : Tuple , A : Dict ) -> str:
return self.dp[u][v]
if __name__ == "__main__":
__A : str = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 33
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__A : Dict = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class _UpperCAmelCase ( unittest.TestCase , _A ):
def A ( self : List[Any] ) -> Dict:
lowercase_ : Optional[int] = load_tool('''text-question-answering''' )
self.tool.setup()
lowercase_ : Union[str, Any] = load_tool('''text-question-answering''' , remote=A )
def A ( self : Any ) -> List[str]:
lowercase_ : Union[str, Any] = self.tool(A , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : str ) -> List[str]:
lowercase_ : int = self.remote_tool(A , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : List[Any] ) -> int:
lowercase_ : Optional[Any] = self.tool(text=A , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : List[str] ) -> Optional[int]:
lowercase_ : int = self.remote_tool(text=A , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
| 33
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_UpperCamelCase: int = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> Dict:
'''simple docstring'''
if attention_mask is None:
lowercase : Optional[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowercase : List[str] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowercase : Tuple = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase : Dict = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class a__ :
def __init__( self : Tuple, lowerCAmelCase : Optional[Any], lowerCAmelCase : Optional[Any]=13, lowerCAmelCase : Optional[int]=7, lowerCAmelCase : List[Any]=True, lowerCAmelCase : str=False, lowerCAmelCase : str=99, lowerCAmelCase : Union[str, Any]=16, lowerCAmelCase : Tuple=2, lowerCAmelCase : List[str]=4, lowerCAmelCase : Union[str, Any]=4, lowerCAmelCase : Optional[Any]="gelu", lowerCAmelCase : str=0.1, lowerCAmelCase : Optional[Any]=0.1, lowerCAmelCase : Dict=32, lowerCAmelCase : int=2, lowerCAmelCase : int=1, lowerCAmelCase : Tuple=0, lowerCAmelCase : int=0.02, ) -> Optional[int]:
lowercase : Optional[Any] = parent
lowercase : List[str] = batch_size
lowercase : Optional[int] = seq_length
lowercase : List[Any] = is_training
lowercase : Optional[int] = use_labels
lowercase : Union[str, Any] = vocab_size
lowercase : Dict = hidden_size
lowercase : List[Any] = num_hidden_layers
lowercase : Tuple = num_attention_heads
lowercase : Union[str, Any] = intermediate_size
lowercase : Any = hidden_act
lowercase : Optional[Any] = hidden_dropout_prob
lowercase : int = attention_probs_dropout_prob
lowercase : List[str] = max_position_embeddings
lowercase : Dict = eos_token_id
lowercase : Any = pad_token_id
lowercase : Dict = bos_token_id
lowercase : str = initializer_range
def lowercase ( self : Optional[int] ) -> Optional[int]:
lowercase : Union[str, Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ), 3, self.vocab_size )
lowercase : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.intaa )), -1 )
lowercase : int = shift_tokens_right(lowerCAmelCase, 1, 2 )
lowercase : Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=lowerCAmelCase, )
lowercase : Dict = prepare_blenderbot_inputs_dict(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
return config, inputs_dict
def lowercase ( self : Tuple ) -> Tuple:
lowercase , lowercase : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase ( self : Any, lowerCAmelCase : List[str], lowerCAmelCase : Dict, lowerCAmelCase : int ) -> str:
lowercase : Optional[int] = 20
lowercase : Union[str, Any] = model_class_name(lowerCAmelCase )
lowercase : List[str] = model.encode(inputs_dict['input_ids'] )
lowercase , lowercase : int = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
lowercase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0], lowerCAmelCase, lowerCAmelCase )
lowercase : Optional[int] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype='i4' )
lowercase : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
lowercase : List[str] = model.decode(
decoder_input_ids[:, :-1], lowerCAmelCase, decoder_attention_mask=lowerCAmelCase, past_key_values=lowerCAmelCase, decoder_position_ids=lowerCAmelCase, )
lowercase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='i4' )
lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:], lowerCAmelCase, decoder_attention_mask=lowerCAmelCase, past_key_values=outputs_cache.past_key_values, decoder_position_ids=lowerCAmelCase, )
lowercase : Any = model.decode(lowerCAmelCase, lowerCAmelCase )
lowercase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3, msg=f'''Max diff is {diff}''' )
def lowercase ( self : Dict, lowerCAmelCase : Tuple, lowerCAmelCase : Tuple, lowerCAmelCase : Optional[int] ) -> List[Any]:
lowercase : Dict = 20
lowercase : Optional[int] = model_class_name(lowerCAmelCase )
lowercase : List[Any] = model.encode(inputs_dict['input_ids'] )
lowercase , lowercase : Optional[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
lowercase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
], axis=-1, )
lowercase : Any = model.init_cache(decoder_input_ids.shape[0], lowerCAmelCase, lowerCAmelCase )
lowercase : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1], lowerCAmelCase, decoder_attention_mask=lowerCAmelCase, past_key_values=lowerCAmelCase, decoder_position_ids=lowerCAmelCase, )
lowercase : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='i4' )
lowercase : str = model.decode(
decoder_input_ids[:, -1:], lowerCAmelCase, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=lowerCAmelCase, decoder_position_ids=lowerCAmelCase, )
lowercase : List[Any] = model.decode(lowerCAmelCase, lowerCAmelCase, decoder_attention_mask=lowerCAmelCase )
lowercase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3, msg=f'''Max diff is {diff}''' )
@require_flax
class a__ ( unittest.TestCase ):
_lowerCamelCase = 99
def lowercase ( self : Tuple ) -> Union[str, Any]:
lowercase : int = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
], dtype=np.intaa, )
lowercase : List[Any] = input_ids.shape[0]
lowercase : str = BlenderbotSmallConfig(
vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, )
return config, input_ids, batch_size
def lowercase ( self : Any ) -> List[str]:
lowercase , lowercase , lowercase : Optional[Any] = self._get_config_and_data()
lowercase : Any = FlaxBlenderbotSmallForConditionalGeneration(lowerCAmelCase )
lowercase : Dict = lm_model(input_ids=lowerCAmelCase )
lowercase : Dict = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape, lowerCAmelCase )
def lowercase ( self : Tuple ) -> int:
lowercase : str = BlenderbotSmallConfig(
vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, )
lowercase : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(lowerCAmelCase )
lowercase : Tuple = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], dtype=np.intaa )
lowercase : List[str] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], dtype=np.intaa )
lowercase : Dict = lm_model(input_ids=lowerCAmelCase, decoder_input_ids=lowerCAmelCase )
lowercase : List[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape, lowerCAmelCase )
def lowercase ( self : Dict ) -> Dict:
lowercase : List[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=np.intaa )
lowercase : List[Any] = shift_tokens_right(lowerCAmelCase, 1, 2 )
lowercase : int = np.equal(lowerCAmelCase, 1 ).astype(np.floataa ).sum()
lowercase : Dict = np.equal(lowerCAmelCase, 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape, input_ids.shape )
self.assertEqual(lowerCAmelCase, n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0], 2 ).all() )
@require_flax
class a__ ( SCREAMING_SNAKE_CASE__, unittest.TestCase, SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
_lowerCamelCase = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def lowercase ( self : List[str] ) -> Tuple:
lowercase : str = FlaxBlenderbotSmallModelTester(self )
def lowercase ( self : List[str] ) -> Dict:
lowercase , lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : Dict ) -> Tuple:
lowercase , lowercase : Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
lowercase , lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase : Any = self._prepare_for_class(lowerCAmelCase, lowerCAmelCase )
lowercase : str = model_class(lowerCAmelCase )
@jax.jit
def encode_jitted(lowerCAmelCase : Optional[int], lowerCAmelCase : Dict=None, **lowerCAmelCase : Tuple ):
return model.encode(input_ids=lowerCAmelCase, attention_mask=lowerCAmelCase )
with self.subTest('JIT Enabled' ):
lowercase : Any = encode_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowercase : List[Any] = encode_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ), len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase, lowerCAmelCase ):
self.assertEqual(jitted_output.shape, output.shape )
def lowercase ( self : Optional[int] ) -> Dict:
lowercase , lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase : List[str] = model_class(lowerCAmelCase )
lowercase : Optional[int] = model.encode(inputs_dict['input_ids'], inputs_dict['attention_mask'] )
lowercase : Union[str, Any] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase : Optional[Any], lowerCAmelCase : Dict, lowerCAmelCase : str ):
return model.decode(
decoder_input_ids=lowerCAmelCase, decoder_attention_mask=lowerCAmelCase, encoder_outputs=lowerCAmelCase, )
with self.subTest('JIT Enabled' ):
lowercase : Optional[Any] = decode_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowercase : List[str] = decode_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ), len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase, lowerCAmelCase ):
self.assertEqual(jitted_output.shape, output.shape )
@slow
def lowercase ( self : Tuple ) -> List[str]:
for model_class_name in self.all_model_classes:
lowercase : Union[str, Any] = model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowercase : str = np.ones((1, 1) ) * model.config.eos_token_id
lowercase : List[Any] = model(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
| 53
|
"""simple docstring"""
import unittest
from transformers import DonutProcessor
_UpperCamelCase: Any = 'naver-clova-ix/donut-base'
class a__ ( unittest.TestCase ):
def lowercase ( self : Optional[Any] ) -> Tuple:
lowercase : Any = DonutProcessor.from_pretrained(lowerCAmelCase )
def lowercase ( self : Dict ) -> Union[str, Any]:
lowercase : Tuple = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
lowercase : Tuple = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
lowercase : Any = self.processor.tokenajson(lowerCAmelCase )
self.assertDictEqual(lowerCAmelCase, lowerCAmelCase )
| 53
| 1
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : List[str] = logging.get_logger(__name__)
a__ : Optional[int] = {'vocab_file': 'vocab.txt'}
a__ : Optional[Any] = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
a__ : Optional[int] = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
with open(__A , "r" ) as f:
UpperCamelCase__ = f.read().splitlines()
return [l.strip() for l in lines]
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , a , a="<unk>" , a="<cls>" , a="<pad>" , a="<mask>" , a="<eos>" , **a , ):
super().__init__(**a )
UpperCamelCase__ = load_vocab_file(a )
UpperCamelCase__ = dict(enumerate(self.all_tokens ) )
UpperCamelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCamelCase__ = unk_token
UpperCamelCase__ = cls_token
UpperCamelCase__ = pad_token
UpperCamelCase__ = mask_token
UpperCamelCase__ = eos_token
UpperCamelCase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __a ( self , a ):
return self._id_to_token.get(a , self.unk_token )
def __a ( self , a ):
return self._token_to_id.get(a , self._token_to_id.get(self.unk_token ) )
def __a ( self , a , **a ):
return text.split()
def __a ( self , a=False ):
return len(self._id_to_token )
def __a ( self ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __a ( self , a ):
return self._token_to_id.get(a , self._token_to_id.get(self.unk_token ) )
def __a ( self , a ):
return self._id_to_token.get(a , self.unk_token )
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.cls_token_id]
UpperCamelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __a ( self , a , a = None , a = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCamelCase__ = [1] + ([0] * len(a )) + [1]
if token_ids_a is not None:
mask += [0] * len(a ) + [1]
return mask
def __a ( self , a , a ):
UpperCamelCase__ = os.path.join(a , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(a , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def __a ( self ):
return self.get_vocab_size(with_added_tokens=a )
def __a ( self , a , a = False ):
return super()._add_tokens(a , special_tokens=a )
| 80
|
'''simple docstring'''
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
lowerCamelCase__ : str = 0.0
for coeff in reversed(UpperCamelCase ):
lowerCamelCase__ : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
_A : Any =(0.0, 0.0, 5.0, 9.3, 7.0)
_A : Optional[Any] =10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 41
| 0
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_lowerCAmelCase :int = logging.get_logger(__name__)
class _UpperCAmelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , *A , **A ) -> Optional[Any]:
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 358
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase :Optional[Any] = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 68
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
_a = '2020.9.26'
_a = 'xcodz-dot, cclaus, dhruvmanila'
def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float, UpperCamelCase_ : float, UpperCamelCase_ : float, UpperCamelCase_ : float) -> tuple[float, float]:
'''simple docstring'''
if not all(isinstance(UpperCamelCase_, (float, int)) for val in locals().values()):
__lowercase = F"""Input values must either be float or int: {list(locals().values())}"""
raise TypeError(UpperCamelCase_)
__lowercase = ((x * distance) / (z + distance)) * scale
__lowercase = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float, UpperCamelCase_ : float, UpperCamelCase_ : str, UpperCamelCase_ : float) -> tuple[float, float, float]:
'''simple docstring'''
if not isinstance(UpperCamelCase_, UpperCamelCase_):
raise TypeError("Axis must be a str")
__lowercase = locals()
del input_variables["axis"]
if not all(isinstance(UpperCamelCase_, (float, int)) for val in input_variables.values()):
__lowercase = (
"Input values except axis must either be float or int: "
F"""{list(input_variables.values())}"""
)
raise TypeError(UpperCamelCase_)
__lowercase = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
__lowercase = x * math.cos(UpperCamelCase_) - y * math.sin(UpperCamelCase_)
__lowercase = y * math.cos(UpperCamelCase_) + x * math.sin(UpperCamelCase_)
__lowercase = z
elif axis == "x":
__lowercase = y * math.cos(UpperCamelCase_) - z * math.sin(UpperCamelCase_)
__lowercase = z * math.cos(UpperCamelCase_) + y * math.sin(UpperCamelCase_)
__lowercase = x
elif axis == "y":
__lowercase = x * math.cos(UpperCamelCase_) - z * math.sin(UpperCamelCase_)
__lowercase = z * math.cos(UpperCamelCase_) + x * math.sin(UpperCamelCase_)
__lowercase = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'")
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }")
print(F"{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }")
| 17
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCamelCase = 5_00_03
lowerCamelCase = 5_00_02
@require_sentencepiece
@require_tokenizers
class A ( UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : int =PLBartTokenizer
UpperCamelCase__ : Dict =None
UpperCamelCase__ : Optional[Any] =False
def lowerCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : Any =PLBartTokenizer(lowercase_ , language_codes='base' , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[str] =PLBartTokenizer(lowercase_ , language_codes='base' , keep_accents=lowercase_ )
_lowerCamelCase : Optional[int] =tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCamelCase : str =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCamelCase : List[Any] =tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCamelCase : Optional[Any] =tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
_lowerCamelCase : str =tokenizer.vocab_size
_lowerCamelCase : List[str] =[tokenizer.convert_ids_to_tokens(lowercase_ ) for x in range(end - 4 , lowercase_ )]
self.assertListEqual(lowercase_ , ['__java__', '__python__', '__en_XX__', '<mask>'] )
_lowerCamelCase : Optional[Any] ='java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
_lowerCamelCase : Dict =tokenizer(lowercase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ ) , lowercase_ , )
def lowerCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Tuple =PLBartTokenizer(lowercase_ , language_codes='multi' , keep_accents=lowercase_ )
_lowerCamelCase : Any =tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCamelCase : Optional[Any] =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCamelCase : Dict =tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCamelCase : Optional[Any] =tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
_lowerCamelCase : Dict =tokenizer.vocab_size
_lowerCamelCase : Optional[int] =[tokenizer.convert_ids_to_tokens(lowercase_ ) for x in range(end - 7 , lowercase_ )]
self.assertListEqual(
lowercase_ , ['__java__', '__python__', '__en_XX__', '__javascript__', '__php__', '__ruby__', '__go__'] )
_lowerCamelCase : int ='java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
_lowerCamelCase : Any =tokenizer(lowercase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ ) , lowercase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
UpperCamelCase__ : List[Any] ='uclanlp/plbart-python-en_XX'
UpperCamelCase__ : List[str] =[
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
UpperCamelCase__ : Optional[int] =[
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
UpperCamelCase__ : str =[
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCamelCase ( cls : str ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : PLBartTokenizer =PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='base' , src_lang='python' , tgt_lang='en_XX' )
_lowerCamelCase : Any =1
return cls
def lowerCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__java__'] , 5_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__python__'] , 5_0002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__en_XX__'] , 5_0003 )
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
def lowerCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.assertIn(lowercase_ , self.tokenizer.all_special_ids )
_lowerCamelCase : Dict =[EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2]
_lowerCamelCase : Optional[int] =self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ )
_lowerCamelCase : int =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertNotIn(self.tokenizer.eos_token , lowercase_ )
def lowerCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[Any] =['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 20]
self.assertIsInstance(src_text[0] , lowercase_ )
_lowerCamelCase : Tuple =10
_lowerCamelCase : Optional[int] =self.tokenizer(lowercase_ , max_length=lowercase_ , truncation=lowercase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowercase_ )
self.assertEqual(len(lowercase_ ) , lowercase_ )
def lowerCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', '__java__'] ) , [5_0004, 5_0001] )
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[str] =tempfile.mkdtemp()
_lowerCamelCase : Dict =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase_ )
_lowerCamelCase : Any =PLBartTokenizer.from_pretrained(lowercase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase_ )
@require_torch
def lowerCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase_ , return_tensors='pt' )
_lowerCamelCase : List[Any] =shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , lowercase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : int =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_lowerCamelCase : List[Any] =shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
_lowerCamelCase : List[Any] =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Tuple =self.tokenizer(self.src_text , padding=lowercase_ , truncation=lowercase_ , max_length=3 , return_tensors='pt' )
_lowerCamelCase : Dict =self.tokenizer(
text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=10 , return_tensors='pt' )
_lowerCamelCase : List[str] =targets['input_ids']
_lowerCamelCase : Optional[Any] =shift_tokens_right(lowercase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_lowerCamelCase : Any =self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='java' )
self.assertEqual(
nested_simplify(lowercase_ ) , {
# A, test, EOS, en_XX
'input_ids': [[150, 242, 2, 5_0003]],
'attention_mask': [[1, 1, 1, 1]],
# java
'forced_bos_token_id': 5_0001,
} , )
| 199
| 0
|
'''simple docstring'''
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase = "" , __lowerCAmelCase = False ) -> None:
# Mapping from the first character of the prefix of the node
lowercase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowercase__ : Optional[Any] = is_leaf
lowercase__ : Optional[int] = prefix
def _lowerCAmelCase( self , __lowerCAmelCase ) -> tuple[str, str, str]:
lowercase__ : Union[str, Any] = 0
for q, w in zip(self.prefix , __lowerCAmelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _lowerCAmelCase( self , __lowerCAmelCase ) -> None:
for word in words:
self.insert(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> None:
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowercase__ : Union[str, Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase__ : Dict = RadixNode(prefix=__lowerCAmelCase , is_leaf=__lowerCAmelCase )
else:
lowercase__ : Tuple = self.nodes[word[0]]
lowercase__ , lowercase__ , lowercase__ : Dict = incoming_node.match(
__lowerCAmelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__lowerCAmelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase__ : List[Any] = remaining_prefix
lowercase__ : List[Any] = self.nodes[matching_string[0]]
lowercase__ : Dict = RadixNode(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : int = aux_node
if remaining_word == "":
lowercase__ : Optional[Any] = True
else:
self.nodes[matching_string[0]].insert(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> bool:
lowercase__ : Union[str, Any] = self.nodes.get(word[0] , __lowerCAmelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ : int = incoming_node.match(
__lowerCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> bool:
lowercase__ : Optional[int] = self.nodes.get(word[0] , __lowerCAmelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ : Dict = incoming_node.match(
__lowerCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__lowerCAmelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase__ : Tuple = list(self.nodes.values() )[0]
lowercase__ : List[str] = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase__ : str = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase__ : int = False
# If there is 1 edge, we merge it with its child
else:
lowercase__ : Any = list(incoming_node.nodes.values() )[0]
lowercase__ : Tuple = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase__ : List[str] = merging_node.nodes
return True
def _lowerCAmelCase( self , __lowerCAmelCase = 0 ) -> None:
if self.prefix != "":
print('''-''' * height , self.prefix , ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __UpperCamelCase ( ):
lowercase__ : Optional[Any] = '''banana bananas bandana band apple all beast'''.split()
lowercase__ : List[Any] = RadixNode()
root.insert_many(UpperCAmelCase )
assert all(root.find(UpperCAmelCase ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def __UpperCamelCase ( ):
assert test_trie()
def __UpperCamelCase ( ):
lowercase__ : Union[str, Any] = RadixNode()
lowercase__ : Dict = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(UpperCAmelCase )
print('''Words:''' , UpperCAmelCase )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 214
|
'''simple docstring'''
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> List[str]:
lowercase__ : Dict = {}
def _lowerCAmelCase( self ) -> None:
print(self.vertex )
for i in self.vertex:
print(__lowerCAmelCase , ''' -> ''' , ''' -> '''.join([str(__lowerCAmelCase ) for j in self.vertex[i]] ) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__lowerCAmelCase )
else:
# else make a new vertex
lowercase__ : Union[str, Any] = [to_vertex]
def _lowerCAmelCase( self ) -> None:
# visited array for storing already visited nodes
lowercase__ : str = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> None:
# mark start vertex as visited
lowercase__ : List[str] = True
print(__lowerCAmelCase , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
__a: Optional[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 214
| 1
|
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase:
'''simple docstring'''
def __init__( self: List[str], a_: Dict, a_: Optional[Any]=2, a_: Optional[int]=8, a_: Union[str, Any]=True, a_: Dict=True, a_: Optional[int]=True, a_: Any=True, a_: List[Any]=99, a_: str=16, a_: List[str]=5, a_: Tuple=2, a_: Optional[Any]=36, a_: Dict="gelu", a_: str=0.0, a_: Tuple=0.0, a_: Dict=512, a_: str=16, a_: str=2, a_: List[Any]=0.02, a_: Dict=3, a_: Union[str, Any]=4, a_: List[str]=None, ):
'''simple docstring'''
_snake_case : Tuple = parent
_snake_case : Tuple = batch_size
_snake_case : Tuple = seq_length
_snake_case : Optional[Any] = is_training
_snake_case : List[Any] = use_input_mask
_snake_case : List[Any] = use_token_type_ids
_snake_case : int = use_labels
_snake_case : str = vocab_size
_snake_case : List[Any] = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Any = hidden_act
_snake_case : Optional[int] = hidden_dropout_prob
_snake_case : Tuple = attention_probs_dropout_prob
_snake_case : int = max_position_embeddings
_snake_case : int = type_vocab_size
_snake_case : Any = type_sequence_label_size
_snake_case : Any = initializer_range
_snake_case : Optional[int] = num_labels
_snake_case : Optional[Any] = num_choices
_snake_case : List[Any] = scope
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_snake_case : Optional[int] = None
if self.use_input_mask:
_snake_case : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : Any = None
if self.use_token_type_ids:
_snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
_snake_case : List[str] = None
_snake_case : List[Any] = None
_snake_case : List[Any] = None
if self.use_labels:
_snake_case : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
_snake_case : Any = ids_tensor([self.batch_size], self.num_choices )
_snake_case : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=a_, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : int = self.get_config()
_snake_case : Optional[Any] = 300
return config
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Union[str, Any] = self.prepare_config_and_inputs()
_snake_case : Tuple = True
_snake_case : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: List[str], a_: Optional[int], a_: List[Any], a_: List[str], a_: List[str], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = MraModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : str = model(a_, attention_mask=a_, token_type_ids=a_ )
_snake_case : List[str] = model(a_, token_type_ids=a_ )
_snake_case : int = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self: int, a_: Any, a_: Any, a_: List[Any], a_: List[str], a_: List[str], a_: Union[str, Any], a_: List[str], a_: List[Any], a_: Optional[Any], ):
'''simple docstring'''
_snake_case : List[str] = True
_snake_case : Tuple = MraModel(a_ )
model.to(a_ )
model.eval()
_snake_case : str = model(
a_, attention_mask=a_, token_type_ids=a_, encoder_hidden_states=a_, encoder_attention_mask=a_, )
_snake_case : Optional[Any] = model(
a_, attention_mask=a_, token_type_ids=a_, encoder_hidden_states=a_, )
_snake_case : List[Any] = model(a_, attention_mask=a_, token_type_ids=a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self: List[str], a_: Union[str, Any], a_: Union[str, Any], a_: List[str], a_: Union[str, Any], a_: int, a_: int, a_: Tuple ):
'''simple docstring'''
_snake_case : List[Any] = MraForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
_snake_case : List[str] = model(a_, attention_mask=a_, token_type_ids=a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self: int, a_: Optional[Any], a_: Any, a_: Any, a_: Optional[int], a_: Any, a_: Union[str, Any], a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = MraForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Optional[Any] = model(
a_, attention_mask=a_, token_type_ids=a_, start_positions=a_, end_positions=a_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self: Any, a_: Dict, a_: int, a_: int, a_: List[Any], a_: List[str], a_: List[Any], a_: Any ):
'''simple docstring'''
_snake_case : str = self.num_labels
_snake_case : Optional[int] = MraForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Optional[Any] = model(a_, attention_mask=a_, token_type_ids=a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self: List[str], a_: Dict, a_: Tuple, a_: Union[str, Any], a_: int, a_: Dict, a_: Any, a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.num_labels
_snake_case : Dict = MraForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
_snake_case : List[str] = model(a_, attention_mask=a_, token_type_ids=a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self: List[str], a_: Any, a_: Dict, a_: List[Any], a_: Optional[Any], a_: List[Any], a_: Optional[int], a_: List[Any] ):
'''simple docstring'''
_snake_case : Dict = self.num_choices
_snake_case : Dict = MraForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Any = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
_snake_case : int = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
_snake_case : int = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
_snake_case : Optional[int] = model(
a_, attention_mask=a_, token_type_ids=a_, labels=a_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : int = config_and_inputs
_snake_case : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = ()
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[Any] = MraModelTester(self )
_snake_case : Union[str, Any] = ConfigTester(self, config_class=a_, hidden_size=37 )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case : str = type
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
@slow
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[Any] = MraModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@unittest.skip(reason="""MRA does not output attentions""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return
@require_torch
class lowercase( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Union[str, Any] = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
_snake_case : Tuple = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_snake_case : List[str] = model(a_ )[0]
_snake_case : List[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape, a_ )
_snake_case : Optional[int] = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], a_, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : List[str] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
_snake_case : List[str] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_snake_case : Tuple = model(a_ )[0]
_snake_case : str = 50_265
_snake_case : Any = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape, a_ )
_snake_case : Any = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], a_, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
_snake_case : Optional[Any] = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
_snake_case : str = model(a_ )[0]
_snake_case : int = 50_265
_snake_case : Any = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape, a_ )
_snake_case : Any = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], a_, atol=1E-4 ) )
| 64
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = [randint(-1000 , 1000 ) for i in range(10 )]
UpperCAmelCase__ = randint(-5000 , 5000 )
return (arr, r)
_lowerCAmelCase : Optional[int] = make_dataset()
def lowerCAmelCase ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
"""simple docstring"""
for triplet in permutations(_lowerCAmelCase , 3 ):
if sum(_lowerCAmelCase ) == target:
return tuple(sorted(_lowerCAmelCase ) )
return (0, 0, 0)
def lowerCAmelCase ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
"""simple docstring"""
arr.sort()
UpperCAmelCase__ = len(_lowerCAmelCase )
for i in range(n - 1 ):
UpperCAmelCase__ , UpperCAmelCase__ = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
UpperCAmelCase__ = "\ntriplet_sum1(*dataset)\n"
UpperCAmelCase__ = "\ntriplet_sum2(*dataset)\n"
UpperCAmelCase__ = repeat(setup=_lowerCAmelCase , stmt=_lowerCAmelCase , repeat=5 , number=1_0000 )
UpperCAmelCase__ = repeat(setup=_lowerCAmelCase , stmt=_lowerCAmelCase , repeat=5 , number=1_0000 )
return (min(_lowerCAmelCase ), min(_lowerCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCAmelCase : Optional[int] = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 169
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase : Any = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
_lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 272
|
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowercase : Tuple = logging.getLogger(__name__)
def snake_case__ ( __lowerCamelCase : torch.nn.Module , __lowerCamelCase : BnbQuantizationConfig , __lowerCamelCase : Union[str, os.PathLike] = None , __lowerCamelCase : Optional[Dict[str, Union[int, str, torch.device]]] = None , __lowerCamelCase : Optional[List[str]] = None , __lowerCamelCase : Optional[Dict[Union[int, str], Union[int, str]]] = None , __lowerCamelCase : Optional[Union[str, os.PathLike]] = None , __lowerCamelCase : bool = False , ):
"""simple docstring"""
lowerCamelCase__ : str =bnb_quantization_config.load_in_abit
lowerCamelCase__ : str =bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
lowerCamelCase__ : str =[]
# custom device map
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(device_map.keys() ) > 1:
lowerCamelCase__ : Union[str, Any] =[key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCamelCase__ : Any =get_keys_to_not_convert(__lowerCamelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__lowerCamelCase )
lowerCamelCase__ : Tuple =bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCamelCase__ : Optional[Any] =[]
lowerCamelCase__ : List[Any] =bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__lowerCamelCase )
# compatibility with peft
lowerCamelCase__ : List[str] =load_in_abit
lowerCamelCase__ : List[str] =load_in_abit
lowerCamelCase__ : Union[str, Any] =get_parameter_device(__lowerCamelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
lowerCamelCase__ : str =replace_with_bnb_layers(__lowerCamelCase , __lowerCamelCase , modules_to_not_convert=__lowerCamelCase )
# convert param to the right dtype
lowerCamelCase__ : Union[str, Any] =bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCamelCase__ : Optional[int] =name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
lowerCamelCase__ : Dict =getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__lowerCamelCase ):
param.to(__lowerCamelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
lowerCamelCase__ : Dict =replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , modules_to_not_convert=__lowerCamelCase )
lowerCamelCase__ : Optional[int] =get_quantized_model_device_map(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_memory=__lowerCamelCase , no_split_module_classes=__lowerCamelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCamelCase__ : List[str] =True
lowerCamelCase__ : Dict =any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__lowerCamelCase , offload_state_dict=__lowerCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__lowerCamelCase , device_map=__lowerCamelCase , offload_dir=__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
lowerCamelCase__ : List[Any] ={'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
lowerCamelCase__ : List[Any] ={}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCamelCase__ : int ={}
lowerCamelCase__ : Optional[int] =special_dtypes
lowerCamelCase__ : List[str] =no_split_module_classes
lowerCamelCase__ : Tuple =bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCamelCase__ : List[str] =get_balanced_memory(
__lowerCamelCase , low_zero=(device_map == '''balanced_low_0''') , max_memory=__lowerCamelCase , **__lowerCamelCase , )
lowerCamelCase__ : str =max_memory
lowerCamelCase__ : Any =infer_auto_device_map(__lowerCamelCase , **__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
# check if don't have any quantized module on the cpu
lowerCamelCase__ : List[str] =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCamelCase__ : List[str] ={
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=None ):
"""simple docstring"""
if modules_to_not_convert is None:
lowerCamelCase__ : Dict =[]
lowerCamelCase__ , lowerCamelCase__ : List[Any] =_replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int=None , __lowerCamelCase : Optional[Any]=None , ):
"""simple docstring"""
lowerCamelCase__ : Tuple =False
for name, module in model.named_children():
if current_key_name is None:
lowerCamelCase__ : Optional[Any] =[]
current_key_name.append(__lowerCamelCase )
if isinstance(__lowerCamelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCamelCase__ : Optional[Any] ='''.'''.join(__lowerCamelCase )
lowerCamelCase__ : Tuple =True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCamelCase__ : Any =False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCamelCase__ : List[str] =bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__lowerCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCamelCase__ : str =bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
lowerCamelCase__ : Any =module.weight.data
if module.bias is not None:
lowerCamelCase__ : Any =module.bias.data
bnb_module.requires_grad_(__lowerCamelCase )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : str =True
if len(list(module.children() ) ) > 0:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =_replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Any =has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def snake_case__ ( __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
# Create a copy of the model
with init_empty_weights():
lowerCamelCase__ : Optional[Any] =deepcopy(__lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCamelCase__ : Union[str, Any] =find_tied_parameters(__lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : List[str] =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCamelCase__ : Any =sum(__lowerCamelCase , [] )
lowerCamelCase__ : Any =len(__lowerCamelCase ) > 0
# Check if it is a base model
lowerCamelCase__ : Optional[Any] =False
if hasattr(__lowerCamelCase , '''base_model_prefix''' ):
lowerCamelCase__ : Dict =not hasattr(__lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCamelCase__ : List[str] =list(model.named_children() )
lowerCamelCase__ : Any =[list_modules[-1][0]]
# add last module together with tied weights
lowerCamelCase__ : Optional[Any] =set(__lowerCamelCase ) - set(__lowerCamelCase )
lowerCamelCase__ : List[str] =list(set(__lowerCamelCase ) ) + list(__lowerCamelCase )
# remove ".weight" from the keys
lowerCamelCase__ : Optional[Any] =['''.weight''', '''.bias''']
lowerCamelCase__ : List[Any] =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCamelCase__ : Union[str, Any] =name.replace(__lowerCamelCase , '''''' )
filtered_module_names.append(__lowerCamelCase )
return filtered_module_names
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
for m in model.modules():
if isinstance(__lowerCamelCase , bnb.nn.Linearabit ):
return True
return False
def snake_case__ ( __lowerCamelCase : nn.Module ):
"""simple docstring"""
return next(parameter.parameters() ).device
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__lowerCamelCase , __lowerCamelCase , 0 , dtype=__lowerCamelCase , value=__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =param_name
lowerCamelCase__ : Dict =model
if "." in tensor_name:
lowerCamelCase__ : Optional[int] =tensor_name.split('''.''' )
for split in splits[:-1]:
lowerCamelCase__ : Union[str, Any] =getattr(__lowerCamelCase , __lowerCamelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
lowerCamelCase__ : Union[str, Any] =new_module
lowerCamelCase__ : List[Any] =splits[-1]
# offload weights
lowerCamelCase__ : Optional[Any] =False
offload_weight(module._parameters[tensor_name] , __lowerCamelCase , __lowerCamelCase , index=__lowerCamelCase )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , __lowerCamelCase , index=__lowerCamelCase , )
else:
offload_weight(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index=__lowerCamelCase )
offload_weight(__lowerCamelCase , param_name.replace('''weight''' , '''SCB''' ) , __lowerCamelCase , index=__lowerCamelCase )
set_module_tensor_to_device(__lowerCamelCase , __lowerCamelCase , '''meta''' , dtype=__lowerCamelCase , value=torch.empty(*param.size() ) )
| 272
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
a__ : Optional[Any] =logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : str , *__A : Optional[Any] , **__A : List[Any] ):
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , __A , )
super().__init__(*__A , **__A )
| 53
|
'''simple docstring'''
from __future__ import annotations
class snake_case :
"""simple docstring"""
def __init__( self : Optional[int] , __A : list[list[int]] ):
__UpperCamelCase = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(__A ) != 0:
__UpperCamelCase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__A ) != cols:
raise error
for value in row:
if not isinstance(__A , (int, float) ):
raise error
__UpperCamelCase = rows
else:
__UpperCamelCase = []
def _lowerCamelCase ( self : int ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _lowerCamelCase ( self : str ):
return len(self.rows )
@property
def _lowerCamelCase ( self : Any ):
return len(self.rows[0] )
@property
def _lowerCamelCase ( self : Optional[Any] ):
return (self.num_rows, self.num_columns)
@property
def _lowerCamelCase ( self : Dict ):
return self.order[0] == self.order[1]
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__A )
def _lowerCamelCase ( self : Any ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _lowerCamelCase ( self : List[str] ):
return bool(self.determinant() )
def _lowerCamelCase ( self : Dict , __A : int , __A : int ):
__UpperCamelCase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__A ).determinant()
def _lowerCamelCase ( self : Dict , __A : int , __A : int ):
if (row + column) % 2 == 0:
return self.get_minor(__A , __A )
return -1 * self.get_minor(__A , __A )
def _lowerCamelCase ( self : List[str] ):
return Matrix(
[
[self.get_minor(__A , __A ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _lowerCamelCase ( self : Union[str, Any] ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__A )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self : Optional[Any] ):
return str(self.rows )
def __str__( self : Union[str, Any] ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(__A ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def _lowerCamelCase ( self : List[Any] , __A : list[int] , __A : int | None = None ):
__UpperCamelCase = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(__A , __A ):
raise type_error
for value in row:
if not isinstance(__A , (int, float) ):
raise type_error
if len(__A ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(__A )
else:
__UpperCamelCase = self.rows[0:position] + [row] + self.rows[position:]
def _lowerCamelCase ( self : Optional[Any] , __A : list[int] , __A : int | None = None ):
__UpperCamelCase = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(__A , __A ):
raise type_error
for value in column:
if not isinstance(__A , (int, float) ):
raise type_error
if len(__A ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
__UpperCamelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
__UpperCamelCase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Tuple , __A : object ):
if not isinstance(__A , __A ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , __A : object ):
return not self == other
def __neg__( self : List[Any] ):
return self * -1
def __add__( self : List[str] , __A : Matrix ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : str , __A : Matrix ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : str , __A : Matrix | int | float ):
if isinstance(__A , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__A , __A ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(__A , __A ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self : Union[str, Any] , __A : int ):
if not isinstance(__A , __A ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
__UpperCamelCase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _lowerCamelCase ( cls : Tuple , __A : list[int] , __A : list[int] ):
return sum(row[i] * column[i] for i in range(len(__A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Tuple = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 357
|
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _lowerCamelCase:
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Dict = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
_lowercase : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
_lowercase : Optional[int] = UNetaDConditionModel(
sample_size=32, layers_per_block=1, block_out_channels=[32, 64], down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
], mid_block_type='UNetMidBlock2DSimpleCrossAttn', up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'], in_channels=3, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type='text', addition_embed_type_num_heads=2, cross_attention_norm='group_norm', resnet_time_scale_shift='scale_shift', act_fn='gelu', )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
_lowercase : Dict = DDPMScheduler(
num_train_timesteps=10_00, beta_schedule='squaredcos_cap_v2', beta_start=0.0_0_0_1, beta_end=0.0_2, thresholding=lowerCamelCase, dynamic_thresholding_ratio=0.9_5, sample_max_value=1.0, prediction_type='epsilon', variance_type='learned_range', )
torch.manual_seed(0)
_lowercase : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : List[str] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
_lowercase : int = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
_lowercase : List[str] = UNetaDConditionModel(
sample_size=32, layers_per_block=[1, 2], block_out_channels=[32, 64], down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
], mid_block_type='UNetMidBlock2DSimpleCrossAttn', up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'], in_channels=6, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type='text', addition_embed_type_num_heads=2, cross_attention_norm='group_norm', resnet_time_scale_shift='scale_shift', act_fn='gelu', class_embed_type='timestep', mid_block_scale_factor=1.4_1_4, time_embedding_act_fn='gelu', time_embedding_dim=32, )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
_lowercase : Optional[int] = DDPMScheduler(
num_train_timesteps=10_00, beta_schedule='squaredcos_cap_v2', beta_start=0.0_0_0_1, beta_end=0.0_2, thresholding=lowerCamelCase, dynamic_thresholding_ratio=0.9_5, sample_max_value=1.0, prediction_type='epsilon', variance_type='learned_range', )
torch.manual_seed(0)
_lowercase : str = DDPMScheduler(
num_train_timesteps=10_00, beta_schedule='squaredcos_cap_v2', beta_start=0.0_0_0_1, beta_end=0.0_2, )
torch.manual_seed(0)
_lowercase : Union[str, Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = self.get_dummy_components()
_lowercase : List[str] = self.pipeline_class(**lowerCamelCase)
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : int = self.get_dummy_inputs(lowerCamelCase)
_lowercase : int = inputs['prompt']
_lowercase : Dict = inputs['generator']
_lowercase : Optional[int] = inputs['num_inference_steps']
_lowercase : str = inputs['output_type']
if "image" in inputs:
_lowercase : List[Any] = inputs['image']
else:
_lowercase : List[Any] = None
if "mask_image" in inputs:
_lowercase : Union[str, Any] = inputs['mask_image']
else:
_lowercase : Dict = None
if "original_image" in inputs:
_lowercase : Any = inputs['original_image']
else:
_lowercase : Tuple = None
_lowercase , _lowercase : str = pipe.encode_prompt(lowerCamelCase)
# inputs with prompt converted to embeddings
_lowercase : Any = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
_lowercase : int = image
if mask_image is not None:
_lowercase : str = mask_image
if original_image is not None:
_lowercase : Optional[Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : Dict = pipe(**lowerCamelCase)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase)
_lowercase : Any = self.pipeline_class.from_pretrained(lowerCamelCase)
pipe_loaded.to(lowerCamelCase)
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCamelCase, lowerCamelCase) is None, F'''`{optional_component}` did not stay set to None after loading.''', )
_lowercase : Dict = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Optional[Any] = inputs['generator']
_lowercase : Any = inputs['num_inference_steps']
_lowercase : List[Any] = inputs['output_type']
# inputs with prompt converted to embeddings
_lowercase : Optional[int] = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
_lowercase : str = image
if mask_image is not None:
_lowercase : Optional[int] = mask_image
if original_image is not None:
_lowercase : int = original_image
_lowercase : str = pipe_loaded(**lowerCamelCase)[0]
_lowercase : List[Any] = np.abs(to_np(lowerCamelCase) - to_np(lowerCamelCase)).max()
self.assertLess(lowerCamelCase, 1E-4)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.get_dummy_components()
_lowercase : Any = self.pipeline_class(**lowerCamelCase)
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Tuple = pipe(**lowerCamelCase)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase)
_lowercase : List[str] = self.pipeline_class.from_pretrained(lowerCamelCase)
pipe_loaded.to(lowerCamelCase)
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
_lowercase : int = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Tuple = pipe_loaded(**lowerCamelCase)[0]
_lowercase : str = np.abs(to_np(lowerCamelCase) - to_np(lowerCamelCase)).max()
self.assertLess(lowerCamelCase, 1E-4)
| 84
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : str = logging.get_logger(__name__)
A : Dict = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __A( a ):
snake_case_ = '''glpn'''
def __init__( self , _snake_case=3 , _snake_case=4 , _snake_case=[2, 2, 2, 2] , _snake_case=[8, 4, 2, 1] , _snake_case=[32, 64, 160, 256] , _snake_case=[7, 3, 3, 3] , _snake_case=[4, 2, 2, 2] , _snake_case=[1, 2, 5, 8] , _snake_case=[4, 4, 4, 4] , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=0.1 , _snake_case=1E-6 , _snake_case=64 , _snake_case=10 , _snake_case=-1 , **_snake_case , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_snake_case )
__a = num_channels
__a = num_encoder_blocks
__a = depths
__a = sr_ratios
__a = hidden_sizes
__a = patch_sizes
__a = strides
__a = mlp_ratios
__a = num_attention_heads
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = drop_path_rate
__a = layer_norm_eps
__a = decoder_hidden_size
__a = max_depth
__a = head_in_index
| 6
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> int:
'''simple docstring'''
A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() )
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = CLIPConfig
__lowerCamelCase = ['CLIPEncoderLayer']
def __init__( self , lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowercase )
A__ = CLIPVisionModel(config.vision_config )
A__ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowercase )
A__ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowercase )
A__ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowercase )
A__ = nn.Parameter(torch.ones(17 ) , requires_grad=lowercase )
A__ = nn.Parameter(torch.ones(3 ) , requires_grad=lowercase )
@torch.no_grad()
def UpperCamelCase ( self , lowercase , lowercase ) -> Any:
'''simple docstring'''
A__ = self.vision_model(lowercase )[1] # pooled_output
A__ = self.visual_projection(lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ = cosine_distance(lowercase , self.special_care_embeds ).cpu().float().numpy()
A__ = cosine_distance(lowercase , self.concept_embeds ).cpu().float().numpy()
A__ = []
A__ = image_embeds.shape[0]
for i in range(lowercase ):
A__ = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A__ = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A__ = special_cos_dist[i][concept_idx]
A__ = self.special_care_embeds_weights[concept_idx].item()
A__ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
A__ = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
A__ = cos_dist[i][concept_idx]
A__ = self.concept_embeds_weights[concept_idx].item()
A__ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowercase )
result.append(lowercase )
A__ = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCamelCase ( self , lowercase , lowercase ) -> Any:
'''simple docstring'''
A__ = self.vision_model(lowercase )[1] # pooled_output
A__ = self.visual_projection(lowercase )
A__ = cosine_distance(lowercase , self.special_care_embeds )
A__ = cosine_distance(lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A__ = 0.0
A__ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A__ = torch.any(special_scores > 0 , dim=1 )
A__ = special_care * 0.01
A__ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A__ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A__ = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 68
| 0
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> List[str]:
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
snake_case__ : Any = gray_code_sequence_string(UpperCamelCase__ )
#
# convert them to integers
for i in range(len(UpperCamelCase__ ) ):
snake_case__ : List[str] = int(sequence[i] , 2 )
return sequence
def __snake_case( _lowerCAmelCase ) -> Dict:
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
snake_case__ : Union[str, Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
snake_case__ : int = gray_code_sequence_string(bit_count - 1 )
snake_case__ : int = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
snake_case__ : Union[str, Any] = """0""" + smaller_sequence[i]
sequence.append(UpperCamelCase__ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
snake_case__ : Any = """1""" + smaller_sequence[i]
sequence.append(UpperCamelCase__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase = CycleDiffusionPipeline
lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
lowercase = PipelineTesterMixin.required_optional_params - {"latents"}
lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self : Dict ):
torch.manual_seed(0 )
snake_case__ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
snake_case__ : Any = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1_000 , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
snake_case__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
snake_case__ : Union[str, Any] = CLIPTextModel(snake_case_ )
snake_case__ : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case__ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase ( self : Optional[int] , snake_case_ : Any , snake_case_ : str=0 ):
snake_case__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
snake_case__ : List[str] = image / 2 + 0.5
if str(snake_case_ ).startswith("""mps""" ):
snake_case__ : Tuple = torch.manual_seed(snake_case_ )
else:
snake_case__ : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
snake_case__ : str = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase ( self : Tuple ):
snake_case__ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case__ : Optional[Any] = self.get_dummy_components()
snake_case__ : List[Any] = CycleDiffusionPipeline(**snake_case_ )
snake_case__ : List[str] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Tuple = self.get_dummy_inputs(snake_case_ )
snake_case__ : Any = pipe(**snake_case_ )
snake_case__ : Tuple = output.images
snake_case__ : Optional[int] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case__ : List[Any] = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCamelCase ( self : List[str] ):
snake_case__ : List[str] = self.get_dummy_components()
for name, module in components.items():
if hasattr(snake_case_ , """half""" ):
snake_case__ : Optional[int] = module.half()
snake_case__ : List[Any] = CycleDiffusionPipeline(**snake_case_ )
snake_case__ : Union[str, Any] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : int = self.get_dummy_inputs(snake_case_ )
snake_case__ : List[Any] = pipe(**snake_case_ )
snake_case__ : Any = output.images
snake_case__ : Optional[Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case__ : List[Any] = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCamelCase ( self : List[Any] ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def lowerCamelCase ( self : int ):
return super().test_inference_batch_single_identical()
@skip_mps
def lowerCamelCase ( self : Dict ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCamelCase ( self : int ):
return super().test_save_load_optional_components()
@skip_mps
def lowerCamelCase ( self : Optional[int] ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : str ):
snake_case__ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
snake_case__ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
snake_case__ : Tuple = init_image.resize((512, 512) )
snake_case__ : List[Any] = """CompVis/stable-diffusion-v1-4"""
snake_case__ : Tuple = DDIMScheduler.from_pretrained(snake_case_ , subfolder="""scheduler""" )
snake_case__ : Optional[int] = CycleDiffusionPipeline.from_pretrained(
snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
snake_case__ : int = """A black colored car"""
snake_case__ : int = """A blue colored car"""
snake_case__ : Dict = torch.manual_seed(0 )
snake_case__ : Dict = pipe(
prompt=snake_case_ , source_prompt=snake_case_ , image=snake_case_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : Union[str, Any] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def lowerCamelCase ( self : int ):
snake_case__ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
snake_case__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
snake_case__ : Dict = init_image.resize((512, 512) )
snake_case__ : Tuple = """CompVis/stable-diffusion-v1-4"""
snake_case__ : List[Any] = DDIMScheduler.from_pretrained(snake_case_ , subfolder="""scheduler""" )
snake_case__ : str = CycleDiffusionPipeline.from_pretrained(snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
snake_case__ : Tuple = """A black colored car"""
snake_case__ : List[Any] = """A blue colored car"""
snake_case__ : Optional[Any] = torch.manual_seed(0 )
snake_case__ : Any = pipe(
prompt=snake_case_ , source_prompt=snake_case_ , image=snake_case_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : List[Any] = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 43
| 0
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
snake_case_ = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
lowercase__ : int = k.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if k.startswith('encoder' ):
lowercase__ : Tuple = k.replace('.attn' , '.self_attn' )
lowercase__ : List[Any] = k.replace('norm1' , 'self_attn_layer_norm' )
lowercase__ : int = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
lowercase__ : List[Any] = k.replace('norm1' , 'self_attn_layer_norm' )
lowercase__ : List[str] = k.replace('norm2' , 'encoder_attn_layer_norm' )
lowercase__ : int = k.replace('norm3' , 'final_layer_norm' )
return k
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
lowercase__ : Dict = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
lowercase__ : Optional[int] = sd.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[Any] = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
lowercase__ : Optional[Any] = v
snake_case_ = ['''START''']
@torch.no_grad()
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
lowercase__ : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
lowercase__ : str = model['model']
lowercase__ : Optional[Any] = BlenderbotConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
lowercase__ : Any = BlenderbotForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
lowercase__ : Dict = m.model.state_dict().keys()
lowercase__ : Optional[Any] = []
lowercase__ : List[str] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
lowercase__ : int = rename_state_dict_key(SCREAMING_SNAKE_CASE_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
lowercase__ : Dict = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(SCREAMING_SNAKE_CASE_ )
m.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
m.half()
m.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
snake_case_ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 214
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 214
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( A_ , A_ , A_ )-> float:
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def lowercase ( A_ , A_ , A_ , )-> float:
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowercase ( A_ , A_ , A_ , )-> float:
'''simple docstring'''
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
A_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 226
|
"""simple docstring"""
from math import ceil, sqrt
def lowercase ( A_ = 1_000_000 )-> int:
'''simple docstring'''
a : Tuple = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a : str = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a : Tuple = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 226
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowercase = 2_5_6_0_4_7
__lowercase = 2_5_6_1_4_5
@require_sentencepiece
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = NllbTokenizer
UpperCAmelCase_ : Optional[int] = NllbTokenizerFast
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : str = {}
def a_ ( self):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase = NllbTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = NllbTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
lowerCAmelCase = tokenizer.tokenize("""This is a test""")
self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
lowerCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f)
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase)
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
# Save tokenizer rust, legacy_format=True
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase)
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
# Save tokenizer rust, legacy_format=False
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
@require_torch
def a_ ( self):
"""simple docstring"""
if not self.test_seqaseq:
return
lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Longer text that will definitely require truncation.
lowerCAmelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
lowerCAmelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
lowerCAmelCase = tokenizer.prepare_seqaseq_batch(
src_texts=__lowerCAmelCase , tgt_texts=__lowerCAmelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.labels.shape[1] , 10)
# max_target_length will default to max_length if not specified
lowerCAmelCase = tokenizer.prepare_seqaseq_batch(
__lowerCAmelCase , tgt_texts=__lowerCAmelCase , max_length=3 , return_tensors="""pt""")
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.labels.shape[1] , 3)
lowerCAmelCase = tokenizer.prepare_seqaseq_batch(
src_texts=__lowerCAmelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""")
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3)
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3)
self.assertNotIn("""decoder_input_ids""" , __lowerCAmelCase)
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""")
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = [AddedToken("""<special>""" , lstrip=__lowerCAmelCase)]
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = tokenizer_r.encode("""Hey this is a <special> token""")
lowerCAmelCase = tokenizer_r.encode("""<special>""" , add_special_tokens=__lowerCAmelCase)[0]
self.assertTrue(special_token_id in r_output)
if self.test_slow_tokenizer:
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = self.tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.encode("""Hey this is a <special> token""")
lowerCAmelCase = tokenizer_cr.encode("""Hey this is a <special> token""")
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase)
self.assertTrue(special_token_id in p_output)
self.assertTrue(special_token_id in cr_output)
@require_torch
@require_sentencepiece
@require_tokenizers
class a__( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : int = '''facebook/nllb-200-distilled-600M'''
UpperCAmelCase_ : int = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
UpperCAmelCase_ : Optional[int] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
UpperCAmelCase_ : List[Any] = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def a_ ( cls):
"""simple docstring"""
lowerCAmelCase = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""")
lowerCAmelCase = 1
return cls
def a_ ( self):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 256001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 256002)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 256057)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
self.assertIn(__lowerCAmelCase , self.tokenizer.all_special_ids)
# fmt: off
lowerCAmelCase = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047]
# fmt: on
lowerCAmelCase = self.tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase)
lowerCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCAmelCase)
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase)
self.assertNotIn(self.tokenizer.eos_token , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , __lowerCAmelCase)
lowerCAmelCase = 10
lowerCAmelCase = self.tokenizer(__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase).input_ids[0]
self.assertEqual(ids[-1] , 2)
self.assertEqual(ids[0] , __lowerCAmelCase)
self.assertEqual(len(__lowerCAmelCase) , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""]) , [256203, 3])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCAmelCase)
lowerCAmelCase = NllbTokenizer.from_pretrained(__lowerCAmelCase)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCAmelCase)
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=len(self.expected_src_tokens) , return_tensors="""pt""" , )
lowerCAmelCase = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""])
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual((2, 15) , batch.input_ids.shape)
self.assertEqual((2, 15) , batch.attention_mask.shape)
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase)
self.assertEqual(__lowerCAmelCase , batch.decoder_input_ids[0, 0]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.tokenizer(self.src_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=3 , return_tensors="""pt""")
lowerCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=10 , return_tensors="""pt""")
lowerCAmelCase = targets["""input_ids"""]
lowerCAmelCase = shift_tokens_right(
__lowerCAmelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""")
self.assertEqual(
nested_simplify(__lowerCAmelCase) , {
# A, test, EOS, en_XX
"""input_ids""": [[256047, 70, 7356, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 256057,
} , )
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""")
self.assertEqual(
inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047])
lowerCAmelCase = False
lowerCAmelCase = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""")
self.assertEqual(
inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2])
| 272
|
'''simple docstring'''
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
while a != 0:
lowerCAmelCase , lowerCAmelCase = b % a, a
return b
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
if gcd(_A , _A ) != 1:
lowerCAmelCase = f"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(_A )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 0, a
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0, 1, m
while va != 0:
lowerCAmelCase = ua // va
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 272
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __lowerCAmelCase ( __a , __a ):
snake_case : Dict = """swin"""
snake_case : Optional[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__(self , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=4 , lowerCAmelCase__=3 , lowerCAmelCase__=9_6 , lowerCAmelCase__=[2, 2, 6, 2] , lowerCAmelCase__=[3, 6, 1_2, 2_4] , lowerCAmelCase__=7 , lowerCAmelCase__=4.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-5 , lowerCAmelCase__=3_2 , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = image_size
_UpperCAmelCase : int = patch_size
_UpperCAmelCase : Optional[Any] = num_channels
_UpperCAmelCase : List[Any] = embed_dim
_UpperCAmelCase : Tuple = depths
_UpperCAmelCase : Union[str, Any] = len(lowerCAmelCase__ )
_UpperCAmelCase : int = num_heads
_UpperCAmelCase : int = window_size
_UpperCAmelCase : int = mlp_ratio
_UpperCAmelCase : Optional[int] = qkv_bias
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCAmelCase : str = drop_path_rate
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Optional[int] = use_absolute_embeddings
_UpperCAmelCase : Dict = layer_norm_eps
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : List[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase : Tuple = int(embed_dim * 2 ** (len(lowerCAmelCase__ ) - 1) )
_UpperCAmelCase : List[str] = ["""stem"""] + [F"stage{idx}" for idx in range(1 , len(lowerCAmelCase__ ) + 1 )]
_UpperCAmelCase : Dict = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
class __lowerCAmelCase ( __a ):
snake_case : Tuple = version.parse("""1.11""" )
@property
def snake_case_ (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ (self ):
return 1e-4
| 368
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCAmelCase_ : Dict = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
lowerCAmelCase_ : Tuple = parser.parse_args()
lowerCAmelCase_ : Union[str, Any] = '''cpu'''
lowerCAmelCase_ : List[Any] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
lowerCAmelCase_ : List[Any] = '''path-to-your-trained-model'''
lowerCAmelCase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCAmelCase_ : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCAmelCase_ : str = pipe.to(device)
# to channels last
lowerCAmelCase_ : Dict = pipe.unet.to(memory_format=torch.channels_last)
lowerCAmelCase_ : Union[str, Any] = pipe.vae.to(memory_format=torch.channels_last)
lowerCAmelCase_ : Optional[int] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCAmelCase_ : Any = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCAmelCase_ : str = torch.randn(2, 4, 64, 64)
lowerCAmelCase_ : str = torch.rand(1) * 999
lowerCAmelCase_ : Any = torch.randn(2, 77, 768)
lowerCAmelCase_ : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
lowerCAmelCase_ : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCAmelCase_ : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase_ : Dict = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase_ : Optional[int] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCAmelCase_ : Optional[int] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCAmelCase_ : str = 666
lowerCAmelCase_ : int = torch.Generator(device).manual_seed(seed)
lowerCAmelCase_ : Dict = {'''generator''': generator}
if args.steps is not None:
lowerCAmelCase_ : Any = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCAmelCase_ : Tuple = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 170
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a_ :
def __init__( self , snake_case_ , ):
_lowerCAmelCase : int = parent
_lowerCAmelCase : Tuple = 1_3
_lowerCAmelCase : Optional[Any] = 7
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : int = 9_9
_lowerCAmelCase : Optional[Any] = 3_2
_lowerCAmelCase : Optional[int] = 2
_lowerCAmelCase : Optional[Any] = 4
_lowerCAmelCase : Any = 3_7
_lowerCAmelCase : List[Any] = """gelu"""
_lowerCAmelCase : Optional[Any] = 0.1
_lowerCAmelCase : Dict = 0.1
_lowerCAmelCase : Union[str, Any] = 5_1_2
_lowerCAmelCase : Union[str, Any] = 1_6
_lowerCAmelCase : Optional[int] = 2
_lowerCAmelCase : str = 0.02
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Union[str, Any] = 4
_lowerCAmelCase : int = None
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Any = None
if self.use_input_mask:
_lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : Optional[Any] = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self ):
(
_lowerCAmelCase
) : Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = TFEsmModel(config=__A )
_lowerCAmelCase : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCAmelCase : List[str] = model(__A )
_lowerCAmelCase : Union[str, Any] = [input_ids, input_mask]
_lowerCAmelCase : int = model(__A )
_lowerCAmelCase : int = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Tuple = TFEsmModel(config=__A )
_lowerCAmelCase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
_lowerCAmelCase : Dict = model(__A )
_lowerCAmelCase : Optional[int] = [input_ids, input_mask]
_lowerCAmelCase : Optional[Any] = model(__A , encoder_hidden_states=__A )
# Also check the case where encoder outputs are not passed
_lowerCAmelCase : int = model(__A , attention_mask=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[int] = TFEsmForMaskedLM(config=__A )
_lowerCAmelCase : Tuple = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : List[str] = TFEsmForTokenClassification(config=__A )
_lowerCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCAmelCase : Tuple = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) : Optional[int] = config_and_inputs
_lowerCAmelCase : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class a_ (A__ , A__ , unittest.TestCase ):
__lowerCAmelCase : Any = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__lowerCAmelCase : int = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCAmelCase : Any = False
__lowerCAmelCase : List[str] = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = TFEsmModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=__A , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__A )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def __UpperCamelCase ( self ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFEsmModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Union[str, Any] = model_class(__A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_lowerCAmelCase : Optional[int] = model.get_bias()
assert isinstance(__A , __A )
for k, v in name.items():
assert isinstance(__A , tf.Variable )
else:
_lowerCAmelCase : Tuple = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : Any = model.get_bias()
assert name is None
@require_tf
class a_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_lowerCAmelCase : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase : str = model(__A )[0]
_lowerCAmelCase : str = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape ) , __A )
# compare the actual values for a slice.
_lowerCAmelCase : int = tf.constant(
[
[
[8.92_1518, -1_0.5_8_9_8_1_4, -6.467_1307],
[-6.396_7156, -1_3.9_1_1_3_7_7, -1.121_1915],
[-7.78_1247, -1_3.9_5_1_5_5_7, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_lowerCAmelCase : Dict = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
_lowerCAmelCase : Tuple = model(__A )[0]
# compare the actual values for a slice.
_lowerCAmelCase : int = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 309
|
"""simple docstring"""
def _snake_case ( lowercase__ : int = 1_0 ) -> str:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or n < 0:
raise ValueError("""Invalid input""" )
lowerCAmelCase_ :List[str] = 1_0**n
lowerCAmelCase_ :int = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , lowercase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 84
| 0
|
import sys
from collections import defaultdict
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Tuple ) -> Optional[Any]:
a_ : List[Any] = []
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
a_ : str = pos
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
a_ : List[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
a_ : Dict = 2 * start + 1
else:
a_ : List[str] = 2 * start + 2
if heap[smallest_child] < heap[start]:
a_ : Tuple = heap[smallest_child], positions[smallest_child]
a_ : List[str] = (
heap[start],
positions[start],
)
a_ : int = temp, tempa
a_ : Any = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowercase_ )
self.top_to_bottom(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
a_ : List[Any] = position[index]
while index != 0:
a_ : str = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
a_ : Any = heap[parent]
a_ : Tuple = position[parent]
self.set_position(position[parent] , lowercase_ )
else:
a_ : Optional[int] = val
a_ : Tuple = temp
self.set_position(lowercase_ , lowercase_ )
break
a_ : Dict = parent
else:
a_ : Optional[Any] = val
a_ : Tuple = temp
self.set_position(lowercase_ , 0 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]:
a_ : Tuple = len(lowercase_ ) // 2 - 1
for i in range(lowercase_ , -1 , -1 ):
self.top_to_bottom(lowercase_ , lowercase_ , len(lowercase_ ) , lowercase_ )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
a_ : List[str] = positions[0]
a_ : Dict = sys.maxsize
self.top_to_bottom(lowercase_ , 0 , len(lowercase_ ) , lowercase_ )
return temp
def SCREAMING_SNAKE_CASE_ ( __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
a_ : Any = Heap()
a_ : List[str] = [0] * len(__lowerCamelCase )
a_ : Any = [-1] * len(__lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
a_ : Any = [] # Heap of Distance of vertices from their neighboring vertex
a_ : Optional[Any] = []
for vertex in range(len(__lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__lowerCamelCase )
heap.node_position.append(__lowerCamelCase )
a_ : Tuple = []
a_ : List[str] = 1
a_ : Tuple = sys.maxsize
for neighbor, distance in adjacency_list[0]:
a_ : Dict = 0
a_ : List[Any] = distance
heap.heapify(__lowerCamelCase , __lowerCamelCase )
for _ in range(1 , len(__lowerCamelCase ) ):
a_ : List[str] = heap.delete_minimum(__lowerCamelCase , __lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
a_ : Optional[Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__lowerCamelCase )]
):
a_ : Any = distance
heap.bottom_to_top(
__lowerCamelCase , heap.get_position(__lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase )
a_ : Tuple = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase_ : Union[str, Any] = int(input('Enter number of edges: ').strip())
UpperCAmelCase_ : Union[str, Any] = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase_ : Optional[Any] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 351
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
UpperCAmelCase_ : Dict = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
UpperCAmelCase_ : Dict = 'UperNetConfig'
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[int, Tuple[int, int]] , SCREAMING_SNAKE_CASE__ : Union[int, Tuple[int, int], str] = 0 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Union[int, Tuple[int, int]] = 1 , ) -> None:
super().__init__()
a_ : Dict = nn.Convad(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , dilation=SCREAMING_SNAKE_CASE__ , )
a_ : List[str] = nn.BatchNormad(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = nn.ReLU()
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> torch.Tensor:
a_ : int = self.conv(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.batch_norm(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = self.activation(SCREAMING_SNAKE_CASE__ )
return output
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
super().__init__()
a_ : str = [
nn.AdaptiveAvgPoolad(SCREAMING_SNAKE_CASE__ ),
UperNetConvModule(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> torch.Tensor:
a_ : Optional[int] = input
for layer in self.layers:
a_ : int = layer(SCREAMING_SNAKE_CASE__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple[int, ...] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool ) -> None:
super().__init__()
a_ : Tuple = pool_scales
a_ : List[Any] = align_corners
a_ : Tuple = in_channels
a_ : Optional[Any] = channels
a_ : Tuple = []
for i, pool_scale in enumerate(SCREAMING_SNAKE_CASE__ ):
a_ : Optional[int] = UperNetPyramidPoolingBlock(pool_scale=SCREAMING_SNAKE_CASE__ , in_channels=SCREAMING_SNAKE_CASE__ , channels=SCREAMING_SNAKE_CASE__ )
self.blocks.append(SCREAMING_SNAKE_CASE__ )
self.add_module(str(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> List[torch.Tensor]:
a_ : List[Any] = []
for ppm in self.blocks:
a_ : List[Any] = ppm(SCREAMING_SNAKE_CASE__ )
a_ : str = nn.functional.interpolate(
SCREAMING_SNAKE_CASE__ , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(SCREAMING_SNAKE_CASE__ )
return ppm_outs
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
super().__init__()
a_ : List[str] = config
a_ : Optional[int] = config.pool_scales # e.g. (1, 2, 3, 6)
a_ : int = in_channels
a_ : Dict = config.hidden_size
a_ : Optional[Any] = False
a_ : Tuple = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
a_ : int = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
a_ : int = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
a_ : str = nn.ModuleList()
a_ : Dict = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
a_ : str = UperNetConvModule(SCREAMING_SNAKE_CASE__ , self.channels , kernel_size=1 )
a_ : Union[str, Any] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(SCREAMING_SNAKE_CASE__ )
self.fpn_convs.append(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
self.apply(self._init_weights )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
if isinstance(SCREAMING_SNAKE_CASE__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict:
a_ : Dict = inputs[-1]
a_ : str = [x]
psp_outs.extend(self.psp_modules(SCREAMING_SNAKE_CASE__ ) )
a_ : List[Any] = torch.cat(SCREAMING_SNAKE_CASE__ , dim=1 )
a_ : Dict = self.bottleneck(SCREAMING_SNAKE_CASE__ )
return output
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> torch.Tensor:
# build laterals
a_ : List[str] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(SCREAMING_SNAKE_CASE__ ) )
# build top-down path
a_ : List[str] = len(SCREAMING_SNAKE_CASE__ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a_ : Optional[Any] = laterals[i - 1].shape[2:]
a_ : List[Any] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=SCREAMING_SNAKE_CASE__ , mode='bilinear' , align_corners=self.align_corners )
# build outputs
a_ : List[Any] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a_ : int = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
a_ : Optional[int] = torch.cat(SCREAMING_SNAKE_CASE__ , dim=1 )
a_ : Tuple = self.fpn_bottleneck(SCREAMING_SNAKE_CASE__ )
a_ : Dict = self.classifier(SCREAMING_SNAKE_CASE__ )
return output
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : Union[int, Tuple[int, int]] = 1 ) -> None:
super().__init__()
a_ : Optional[int] = config
a_ : str = config.auxiliary_in_channels
a_ : Optional[int] = config.auxiliary_channels
a_ : List[str] = config.auxiliary_num_convs
a_ : Dict = config.auxiliary_concat_input
a_ : int = in_index
a_ : List[Any] = (kernel_size // 2) * dilation
a_ : List[Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , dilation=SCREAMING_SNAKE_CASE__ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , dilation=SCREAMING_SNAKE_CASE__ ) )
if self.num_convs == 0:
a_ : Optional[int] = nn.Identity()
else:
a_ : int = nn.Sequential(*SCREAMING_SNAKE_CASE__ )
if self.concat_input:
a_ : Union[str, Any] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE__ , padding=kernel_size // 2 )
a_ : List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
self.apply(self._init_weights )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> List[str]:
if isinstance(SCREAMING_SNAKE_CASE__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> torch.Tensor:
# just take the relevant feature maps
a_ : Optional[int] = encoder_hidden_states[self.in_index]
a_ : int = self.convs(SCREAMING_SNAKE_CASE__ )
if self.concat_input:
a_ : Optional[Any] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
a_ : Dict = self.classifier(SCREAMING_SNAKE_CASE__ )
return output
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Any = UperNetConfig
snake_case__ : Optional[int] = '''pixel_values'''
snake_case__ : Optional[int] = True
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> str:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int=False ) -> Any:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : Dict = value
UpperCAmelCase_ : int = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
UpperCAmelCase_ : Tuple = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''' , lowercase__ , )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
super().__init__(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
a_ : List[Any] = UperNetHead(SCREAMING_SNAKE_CASE__ , in_channels=self.backbone.channels )
a_ : Dict = UperNetFCNHead(SCREAMING_SNAKE_CASE__ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC )
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]:
a_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
a_ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ : int = output_attentions if output_attentions is not None else self.config.output_attentions
a_ : str = self.backbone.forward_with_filtered_kwargs(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , output_attentions=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = outputs.feature_maps
a_ : Tuple = self.decode_head(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = nn.functional.interpolate(SCREAMING_SNAKE_CASE__ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = None
if self.auxiliary_head is not None:
a_ : Any = self.auxiliary_head(SCREAMING_SNAKE_CASE__ )
a_ : int = nn.functional.interpolate(
SCREAMING_SNAKE_CASE__ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
a_ : List[str] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
a_ : Optional[Any] = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : str = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : str = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
a_ : Optional[Any] = (logits,) + outputs[1:]
else:
a_ : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 120
| 0
|
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case_ ():
UpperCAmelCase = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=_a , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=_a , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=_a )
return parser.parse_args()
def snake_case_ ():
UpperCAmelCase = parse_args()
# Import training_script as a module.
UpperCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCAmelCase = script_fpath.stem
UpperCAmelCase = importlib.import_module(_a )
# Patch sys.argv
UpperCAmelCase = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 34
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1_000 ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = 1
__UpperCamelCase :Any = 0
for divide_by_number in range(SCREAMING_SNAKE_CASE , digit + 1 ):
__UpperCamelCase :list[int] = []
__UpperCamelCase :Optional[int] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = divide_by_number
else:
has_been_divided.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 0
|
"""simple docstring"""
from pathlib import Path
import fire
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : str, _lowerCAmelCase : int ):
"""simple docstring"""
_a = Path(_lowerCAmelCase )
_a = Path(_lowerCAmelCase )
dest_dir.mkdir(exist_ok=_lowerCAmelCase )
for path in src_dir.iterdir():
_a = [x.rstrip() for x in list(path.open().readlines() )][:n]
_a = dest_dir.joinpath(path.name )
print(_lowerCAmelCase )
dest_path.open('''w''' ).write('''\n'''.join(_lowerCAmelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 153
|
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def A_ ( _lowerCAmelCase : Callable, _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float ):
"""simple docstring"""
_a = int(np.ceil((x_end - xa) / step_size ) )
_a = np.zeros((n + 1,) )
_a = ya
_a = xa
for k in range(_lowerCAmelCase ):
_a = y[k] + step_size * ode_func(_lowerCAmelCase, y[k] )
_a = y[k] + (
(step_size / 2) * (ode_func(_lowerCAmelCase, y[k] ) + ode_func(x + step_size, _lowerCAmelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 153
| 1
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__A =logging.get_logger(__name__)
__A ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A ={
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__A ={
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__A ={
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
__A ={
"facebook/dpr-ctx_encoder-single-nq-base": 5_1_2,
"facebook/dpr-ctx_encoder-multiset-base": 5_1_2,
}
__A ={
"facebook/dpr-question_encoder-single-nq-base": 5_1_2,
"facebook/dpr-question_encoder-multiset-base": 5_1_2,
}
__A ={
"facebook/dpr-reader-single-nq-base": 5_1_2,
"facebook/dpr-reader-multiset-base": 5_1_2,
}
__A ={
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
__A ={
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
__A ={
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__A =collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
__A =collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
__A =R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__UpperCamelCase )
class UpperCAmelCase__ :
'''simple docstring'''
def __call__( self : Tuple , a_ : List[str] , a_ : Optional[str] = None , a_ : Optional[str] = None , a_ : Union[bool, str] = False , a_ : Union[bool, str] = False , a_ : Optional[int] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Optional[bool] = None , **a_ : str , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
a_ , padding=a_ , truncation=a_ , max_length=a_ , return_tensors=a_ , return_attention_mask=a_ , **a_ , )
elif titles is None or texts is None:
__UpperCAmelCase : str = titles if texts is None else texts
return super().__call__(
a_ , a_ , padding=a_ , truncation=a_ , max_length=a_ , return_tensors=a_ , return_attention_mask=a_ , **a_ , )
__UpperCAmelCase : Optional[int] = titles if not isinstance(a_ , a_ ) else [titles]
__UpperCAmelCase : Optional[int] = texts if not isinstance(a_ , a_ ) else [texts]
__UpperCAmelCase : List[Any] = len(a_ )
__UpperCAmelCase : List[str] = questions if not isinstance(a_ , a_ ) else [questions] * n_passages
if len(a_ ) != len(a_ ):
raise ValueError(
F'There should be as many titles than texts but got {len(a_ )} titles and {len(a_ )} texts.' )
__UpperCAmelCase : Tuple = super().__call__(a_ , a_ , padding=a_ , truncation=a_ )['''input_ids''']
__UpperCAmelCase : Union[str, Any] = super().__call__(a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ )['''input_ids''']
__UpperCAmelCase : List[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(a_ , a_ )
]
}
if return_attention_mask is not False:
__UpperCAmelCase : List[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__UpperCAmelCase : Tuple = attention_mask
return self.pad(a_ , padding=a_ , max_length=a_ , return_tensors=a_ )
def snake_case__ ( self : int , a_ : BatchEncoding , a_ : DPRReaderOutput , a_ : int = 16 , a_ : int = 64 , a_ : int = 4 , ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = reader_input['''input_ids''']
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = reader_output[:3]
__UpperCAmelCase : List[Any] = len(a_ )
__UpperCAmelCase : List[Any] = sorted(range(a_ ) , reverse=a_ , key=relevance_logits.__getitem__ )
__UpperCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__UpperCAmelCase : List[Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__UpperCAmelCase : str = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__UpperCAmelCase : Optional[Any] = sequence_ids.index(self.pad_token_id )
else:
__UpperCAmelCase : int = len(a_ )
__UpperCAmelCase : Dict = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=a_ , top_spans=a_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=a_ , start_index=a_ , end_index=a_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(a_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def snake_case__ ( self : str , a_ : List[int] , a_ : List[int] , a_ : int , a_ : int , ):
'''simple docstring'''
__UpperCAmelCase : List[str] = []
for start_index, start_score in enumerate(a_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__UpperCAmelCase : int = sorted(a_ , key=lambda a_ : x[1] , reverse=a_ )
__UpperCAmelCase : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'Wrong span indices: [{start_index}:{end_index}]' )
__UpperCAmelCase : str = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(a_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__UpperCamelCase )
class UpperCAmelCase__ ( __UpperCamelCase ,__UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = ["""input_ids""", """attention_mask"""]
| 226
|
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__A =getLogger(__name__)
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 8 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : Any="val" , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Any=False , _UpperCAmelCase : Union[str, Any]="summarization" , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Union[str, Any]=1 , _UpperCAmelCase : Dict = None , _UpperCAmelCase : Dict="" , **_UpperCAmelCase : List[str] , ):
'''simple docstring'''
__UpperCAmelCase : Any = str(_UpperCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = Path(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = save_dir.joinpath(f'rank_{local_rank}_output.json' )
torch.cuda.set_device(_UpperCAmelCase )
__UpperCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ).cuda()
if fpaa:
__UpperCAmelCase : Any = model.half()
# determine if we need to increase num_beams
use_task_specific_params(_UpperCAmelCase , _UpperCAmelCase ) # update config with task specific params
__UpperCAmelCase : List[str] = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
__UpperCAmelCase : Any = num_return_sequences
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(_UpperCAmelCase )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
if max_source_length is None:
__UpperCAmelCase : Optional[Any] = tokenizer.model_max_length
if prefix is None:
__UpperCAmelCase : str = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
__UpperCAmelCase : Union[str, Any] = SeqaSeqDataset(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , max_target_length=10_24 , type_path=_UpperCAmelCase , n_obs=_UpperCAmelCase , prefix=_UpperCAmelCase , **_UpperCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
__UpperCAmelCase : str = ds.make_sortish_sampler(_UpperCAmelCase , distributed=_UpperCAmelCase , add_extra_examples=_UpperCAmelCase , shuffle=_UpperCAmelCase )
__UpperCAmelCase : List[Any] = DataLoader(_UpperCAmelCase , sampler=_UpperCAmelCase , batch_size=_UpperCAmelCase , collate_fn=ds.collate_fn )
__UpperCAmelCase : List[Any] = []
for batch in tqdm(_UpperCAmelCase ):
__UpperCAmelCase : str = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=_UpperCAmelCase , num_beams=_UpperCAmelCase , **_UpperCAmelCase , )
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
__UpperCAmelCase : List[str] = batch['''ids''']
if num_return_sequences > 1:
__UpperCAmelCase : Any = chunks(_UpperCAmelCase , _UpperCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_UpperCAmelCase ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(_UpperCAmelCase , _UpperCAmelCase )
return results, sampler.num_replicas
def a ( ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=_UpperCAmelCase , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=_UpperCAmelCase , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=_UpperCAmelCase , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=_UpperCAmelCase , default=_UpperCAmelCase )
parser.add_argument(
'''--type_path''' , type=_UpperCAmelCase , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=_UpperCAmelCase , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=_UpperCAmelCase , default=8 , required=_UpperCAmelCase , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=_UpperCAmelCase , default=-1 , required=_UpperCAmelCase , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=_UpperCAmelCase , default=1 , required=_UpperCAmelCase , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=_UpperCAmelCase , default=6_00 , required=_UpperCAmelCase , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase )
parser.add_argument('''--tgt_lang''' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase )
parser.add_argument(
'''--prefix''' , type=_UpperCAmelCase , required=_UpperCAmelCase , default=_UpperCAmelCase , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
__UpperCAmelCase : Any = time.time()
__UpperCAmelCase , __UpperCAmelCase : Any = parser.parse_known_args()
__UpperCAmelCase : List[Any] = parse_numeric_n_bool_cl_kwargs(_UpperCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(f'parsed the following generate kwargs: {generate_kwargs}' )
__UpperCAmelCase : Union[str, Any] = Path(args.save_dir + '''_tmp''' )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) # this handles locking.
__UpperCAmelCase : int = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(f'Found files at {json_save_dir} please move or remove them.' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
__UpperCAmelCase : List[Any] = {}
if args.src_lang is not None:
__UpperCAmelCase : List[str] = args.src_lang
if args.tgt_lang is not None:
__UpperCAmelCase : List[Any] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : int = eval_data_dir(
args.data_dir , _UpperCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_UpperCAmelCase , **_UpperCAmelCase , )
if args.local_rank <= 0:
__UpperCAmelCase : int = Path(args.save_dir )
save_dir.mkdir(exist_ok=_UpperCAmelCase )
__UpperCAmelCase : List[str] = gather_results_from_each_node(_UpperCAmelCase , _UpperCAmelCase , args.sync_timeout )
__UpperCAmelCase : List[Any] = combine_partial_results(_UpperCAmelCase )
if args.num_return_sequences > 1:
__UpperCAmelCase : int = save_dir.joinpath('''pseudolabel_results.json''' )
print(f'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/' )
save_json(_UpperCAmelCase , _UpperCAmelCase )
return
__UpperCAmelCase : str = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(_UpperCAmelCase ) as f:
__UpperCAmelCase : int = [x.rstrip() for x in f.readlines()][: len(_UpperCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
__UpperCAmelCase : Optional[Any] = '''translation''' in args.task
__UpperCAmelCase : Optional[int] = calculate_bleu if calc_bleu else calculate_rouge
__UpperCAmelCase : Union[str, Any] = '''bleu''' if calc_bleu else '''rouge'''
__UpperCAmelCase : Dict = score_fn(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = len(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = time.time() - start_time
__UpperCAmelCase : List[str] = round(runtime / metrics['''n_obs'''] , 4 )
__UpperCAmelCase : List[str] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
__UpperCAmelCase : List[Any] = save_dir.joinpath(f'{args.type_path}_{metric_name}.json' )
save_json(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase )
print(_UpperCAmelCase )
write_txt_file(_UpperCAmelCase , save_dir.joinpath(f'{args.type_path}_generations.txt' ) )
if args.debug:
write_txt_file(_UpperCAmelCase , save_dir.joinpath(f'{args.type_path}.target' ) )
else:
shutil.rmtree(_UpperCAmelCase )
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = []
for partial_result in partial_results:
records.extend(_UpperCAmelCase )
__UpperCAmelCase : List[str] = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["id"] )
__UpperCAmelCase : Union[str, Any] = [x['''pred'''] for x in records]
return preds
def a ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = time.time()
logger.info('''waiting for all nodes to finish''' )
__UpperCAmelCase : Any = None
while (time.time() - start_wait) < timeout:
__UpperCAmelCase : List[Any] = list(save_dir.glob('''rank_*.json''' ) )
if len(_UpperCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
__UpperCAmelCase : Union[str, Any] = lmap(_UpperCAmelCase , _UpperCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 226
| 1
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1 / sqrt(2 ) ):
"""simple docstring"""
_lowerCAmelCase = tau * frequency / samplerate
_lowerCAmelCase = sin(lowerCAmelCase )
_lowerCAmelCase = cos(lowerCAmelCase )
_lowerCAmelCase = _sin / (2 * q_factor)
_lowerCAmelCase = (1 - _cos) / 2
_lowerCAmelCase = 1 - _cos
_lowerCAmelCase = 1 + alpha
_lowerCAmelCase = -2 * _cos
_lowerCAmelCase = 1 - alpha
_lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1 / sqrt(2 ) ):
"""simple docstring"""
_lowerCAmelCase = tau * frequency / samplerate
_lowerCAmelCase = sin(lowerCAmelCase )
_lowerCAmelCase = cos(lowerCAmelCase )
_lowerCAmelCase = _sin / (2 * q_factor)
_lowerCAmelCase = (1 + _cos) / 2
_lowerCAmelCase = -1 - _cos
_lowerCAmelCase = 1 + alpha
_lowerCAmelCase = -2 * _cos
_lowerCAmelCase = 1 - alpha
_lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1 / sqrt(2 ) ):
"""simple docstring"""
_lowerCAmelCase = tau * frequency / samplerate
_lowerCAmelCase = sin(lowerCAmelCase )
_lowerCAmelCase = cos(lowerCAmelCase )
_lowerCAmelCase = _sin / (2 * q_factor)
_lowerCAmelCase = _sin / 2
_lowerCAmelCase = 0
_lowerCAmelCase = -ba
_lowerCAmelCase = 1 + alpha
_lowerCAmelCase = -2 * _cos
_lowerCAmelCase = 1 - alpha
_lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1 / sqrt(2 ) ):
"""simple docstring"""
_lowerCAmelCase = tau * frequency / samplerate
_lowerCAmelCase = sin(lowerCAmelCase )
_lowerCAmelCase = cos(lowerCAmelCase )
_lowerCAmelCase = _sin / (2 * q_factor)
_lowerCAmelCase = 1 - alpha
_lowerCAmelCase = -2 * _cos
_lowerCAmelCase = 1 + alpha
_lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1 / sqrt(2 ) , ):
"""simple docstring"""
_lowerCAmelCase = tau * frequency / samplerate
_lowerCAmelCase = sin(lowerCAmelCase )
_lowerCAmelCase = cos(lowerCAmelCase )
_lowerCAmelCase = _sin / (2 * q_factor)
_lowerCAmelCase = 10 ** (gain_db / 40)
_lowerCAmelCase = 1 + alpha * big_a
_lowerCAmelCase = -2 * _cos
_lowerCAmelCase = 1 - alpha * big_a
_lowerCAmelCase = 1 + alpha / big_a
_lowerCAmelCase = -2 * _cos
_lowerCAmelCase = 1 - alpha / big_a
_lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1 / sqrt(2 ) , ):
"""simple docstring"""
_lowerCAmelCase = tau * frequency / samplerate
_lowerCAmelCase = sin(lowerCAmelCase )
_lowerCAmelCase = cos(lowerCAmelCase )
_lowerCAmelCase = _sin / (2 * q_factor)
_lowerCAmelCase = 10 ** (gain_db / 40)
_lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos
_lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos
_lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos
_lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos
_lowerCAmelCase = 2 * sqrt(lowerCAmelCase ) * alpha
_lowerCAmelCase = big_a * (pmc + aaa)
_lowerCAmelCase = 2 * big_a * mpc
_lowerCAmelCase = big_a * (pmc - aaa)
_lowerCAmelCase = ppmc + aaa
_lowerCAmelCase = -2 * pmpc
_lowerCAmelCase = ppmc - aaa
_lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1 / sqrt(2 ) , ):
"""simple docstring"""
_lowerCAmelCase = tau * frequency / samplerate
_lowerCAmelCase = sin(lowerCAmelCase )
_lowerCAmelCase = cos(lowerCAmelCase )
_lowerCAmelCase = _sin / (2 * q_factor)
_lowerCAmelCase = 10 ** (gain_db / 40)
_lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos
_lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos
_lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos
_lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos
_lowerCAmelCase = 2 * sqrt(lowerCAmelCase ) * alpha
_lowerCAmelCase = big_a * (ppmc + aaa)
_lowerCAmelCase = -2 * big_a * pmpc
_lowerCAmelCase = big_a * (ppmc - aaa)
_lowerCAmelCase = pmc + aaa
_lowerCAmelCase = 2 * mpc
_lowerCAmelCase = pmc - aaa
_lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 354
|
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
A__ : Any =logging.getLogger()
A__ : int =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase ( snake_case_ ):
def lowercase__ ( self : Optional[Any] , __snake_case : Any ) -> int:
os.makedirs(__snake_case , exist_ok=__snake_case )
_lowerCAmelCase = {"""source""": """What is love ?""", """target""": """life"""}
_lowerCAmelCase = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_lowerCAmelCase = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(__snake_case , f"{split}.{field}" ) , """w""" ) as f:
f.write(__snake_case )
def lowercase__ ( self : Tuple , __snake_case : int , __snake_case : str = "pytorch" ) -> int:
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = os.path.join(__snake_case , """output""" )
_lowerCAmelCase = os.path.join(__snake_case , """data""" )
self._create_dummy_data(data_dir=__snake_case )
_lowerCAmelCase = f"\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n ".split()
if gpus > 0:
testargs.append(f"--gpus={gpus}" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
_lowerCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__snake_case , env=self.get_env() )
_lowerCAmelCase = os.path.join(__snake_case , """metrics.json""" )
with open(__snake_case ) as f:
_lowerCAmelCase = json.load(__snake_case )
return result
@require_torch_gpu
def lowercase__ ( self : Dict ) -> Union[str, Any]:
_lowerCAmelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowercase__ ( self : int ) -> Dict:
_lowerCAmelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
_lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowercase__ ( self : int ) -> List[str]:
_lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 220
| 0
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCAmelCase : Tuple = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def A_ ( _UpperCAmelCase=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE_: Tuple = subparsers.add_parser("tpu-config" , description=_description )
else:
SCREAMING_SNAKE_CASE_: Optional[int] = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
SCREAMING_SNAKE_CASE_: Optional[int] = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=_lowercase , default=_lowercase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=_lowercase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=_lowercase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
SCREAMING_SNAKE_CASE_: Optional[Any] = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=_lowercase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
SCREAMING_SNAKE_CASE_: Optional[int] = defaults.command_file
if not args.command and defaults.commands is not None:
SCREAMING_SNAKE_CASE_: List[Any] = defaults.commands
if not args.tpu_name:
SCREAMING_SNAKE_CASE_: Optional[int] = defaults.tpu_name
if not args.tpu_zone:
SCREAMING_SNAKE_CASE_: Optional[int] = defaults.tpu_zone
if args.accelerate_version == "dev":
SCREAMING_SNAKE_CASE_: Any = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
SCREAMING_SNAKE_CASE_: Optional[Any] = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , _lowercase ):
SCREAMING_SNAKE_CASE_: List[str] = f"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
SCREAMING_SNAKE_CASE_: Tuple = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
SCREAMING_SNAKE_CASE_: str = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [f"pip install {args.accelerate_version}"]
new_cmd += args.command
SCREAMING_SNAKE_CASE_: Union[str, Any] = """; """.join(_lowercase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
SCREAMING_SNAKE_CASE_: int = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"Running {' '.join(_lowercase )}" )
return
subprocess.run(_lowercase )
print("Successfully setup pod." )
def A_ ( ):
SCREAMING_SNAKE_CASE_: Optional[int] = tpu_command_parser()
SCREAMING_SNAKE_CASE_: Tuple = parser.parse_args()
tpu_command_launcher(_lowercase )
| 13
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
_lowercase : Any =logging.get_logger(__name__)
@add_end_docstrings(A__ )
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , *__lowercase , **__lowercase ) -> int:
"""simple docstring"""
super().__init__(*__lowercase , **__lowercase )
requires_backends(self , """vision""" )
self.check_model_type(__lowercase )
def __call__( self , __lowercase , **__lowercase ) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(__lowercase , **__lowercase )
def SCREAMING_SNAKE_CASE__( self , **__lowercase ) -> Optional[Any]:
"""simple docstring"""
return {}, {}, {}
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = load_image(__lowercase )
a__ : Optional[int] = image.size
a__ : int = self.image_processor(images=__lowercase , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : str = self.model(**__lowercase )
return model_outputs
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
a__ : Optional[int] = model_outputs.predicted_depth
a__ : Any = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=__lowercase )
a__ : Union[str, Any] = prediction.squeeze().cpu().numpy()
a__ : List[str] = (output * 2_5_5 / np.max(__lowercase )).astype("""uint8""" )
a__ : Any = Image.fromarray(__lowercase )
a__ : List[str] = {}
a__ : Tuple = predicted_depth
a__ : Any = depth
return output_dict
| 170
| 0
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = DistilBertTokenizer
__SCREAMING_SNAKE_CASE = DistilBertTokenizerFast
__SCREAMING_SNAKE_CASE = True
@slow
def UpperCamelCase ( self ):
A__ = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
A__ = tokenizer.encode('''sequence builders''',add_special_tokens=__lowerCamelCase )
A__ = tokenizer.encode('''multi-sequence build''',add_special_tokens=__lowerCamelCase )
A__ = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
A__ = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase,__lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 39
|
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
A__ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
A__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
sd_pipe.set_scheduler('''sample_euler''' )
A__ = '''A painting of a squirrel eating a burger'''
A__ = torch.manual_seed(0 )
A__ = sd_pipe([prompt],generator=__lowerCamelCase,guidance_scale=9.0,num_inference_steps=20,output_type='''np''' )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
A__ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
A__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
sd_pipe.set_scheduler('''sample_euler''' )
A__ = '''A painting of a squirrel eating a burger'''
A__ = torch.manual_seed(0 )
A__ = sd_pipe([prompt],generator=__lowerCamelCase,guidance_scale=9.0,num_inference_steps=20,output_type='''np''' )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def UpperCamelCase ( self ):
A__ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
A__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
A__ = '''A painting of a squirrel eating a burger'''
A__ = torch.manual_seed(0 )
A__ = sd_pipe(
[prompt],generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=15,output_type='''np''',use_karras_sigmas=__lowerCamelCase,)
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 39
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase : str = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = ["""ViTFeatureExtractor"""]
lowercase : str = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowercase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 20
|
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__A : Any = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
__A : Any = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def UpperCamelCase_ ( A__ : str , A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : Dict = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowerCAmelCase_ : List[str] = int(re.match(R""".*layer_(\d*).*""" , A__ )[1] )
layer_number -= 3
return f'h.{layer_number}.' + key
def UpperCamelCase_ ( A__ : List[str] ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
lowerCAmelCase_ : List[str] = re.search(R"""[^\d](\d+)$""" , str(A__ ) )
if bit_search is None:
raise ValueError(f'`dtype` is not a valid dtype: {dtype}.' )
lowerCAmelCase_ : Dict = int(bit_search.groups()[0] )
return bit_size // 8
def UpperCamelCase_ ( A__ : int , A__ : Tuple , A__ : Any , A__ : Tuple , A__ : List[str] ):
'''simple docstring'''
if bloom_config_file == "":
lowerCAmelCase_ : Any = BloomConfig()
else:
lowerCAmelCase_ : Any = BloomConfig.from_json_file(A__ )
if shard_model:
lowerCAmelCase_ : Any = os.listdir(A__ )
lowerCAmelCase_ : Optional[int] = sorted(filter(lambda A__ : s.startswith("""layer""" ) and "model_00" in s , A__ ) )
lowerCAmelCase_ : Optional[Any] = {"""weight_map""": {}, """metadata""": {}}
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Optional[int] = BloomConfig()
for j, file in enumerate(A__ ):
print("""Processing file: {}""".format(A__ ) )
lowerCAmelCase_ : List[str] = None
for i in range(A__ ):
# load all TP files
lowerCAmelCase_ : Optional[Any] = file.replace("""model_00""" , f'model_0{i}' )
lowerCAmelCase_ : Optional[int] = torch.load(os.path.join(A__ , A__ ) , map_location="""cpu""" )
# Rename keys in the transformers names
lowerCAmelCase_ : str = list(temp.keys() )
for key in keys:
lowerCAmelCase_ : Optional[int] = temp.pop(A__ )
if tensors is None:
lowerCAmelCase_ : str = temp
else:
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase_ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=A__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase_ : Optional[int] = tensors[key] / pretraining_tp
torch.save(
A__ , os.path.join(
A__ , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(A__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowerCAmelCase_ : str = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowerCAmelCase_ : Tuple = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) , str(len(A__ ) ).zfill(5 ) )
lowerCAmelCase_ : str = BloomConfig()
lowerCAmelCase_ : List[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
lowerCAmelCase_ : List[Any] = total_size
with open(A__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(A__ , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f:
lowerCAmelCase_ : Optional[int] = json.dumps(A__ , indent=2 , sort_keys=A__ ) + """\n"""
f.write(A__ )
else:
lowerCAmelCase_ : int = BloomModel(A__ )
lowerCAmelCase_ : Union[str, Any] = os.listdir(A__ )
lowerCAmelCase_ : Tuple = sorted(filter(lambda A__ : s.startswith("""layer""" ) and "model_00" in s , A__ ) )
lowerCAmelCase_ : List[Any] = None
for i, file in enumerate(A__ ):
lowerCAmelCase_ : List[Any] = None
for i in range(A__ ):
# load all TP files
lowerCAmelCase_ : str = file.replace("""model_00""" , f'model_0{i}' )
lowerCAmelCase_ : Optional[Any] = torch.load(os.path.join(A__ , A__ ) , map_location="""cpu""" )
# Rename keys in the transformers names
lowerCAmelCase_ : Union[str, Any] = list(temp.keys() )
for key in keys:
lowerCAmelCase_ : str = temp.pop(A__ )
if tensors is None:
lowerCAmelCase_ : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase_ : Optional[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase_ : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=A__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase_ : Union[str, Any] = tensors[key] / pretraining_tp
lowerCAmelCase_ : Optional[int] = model.load_state_dict(A__ , strict=A__ )
assert not other_keys.unexpected_keys, f'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
lowerCAmelCase_ : Any = set(other_keys.missing_keys )
else:
lowerCAmelCase_ : List[Any] = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(A__ , exist_ok=A__ )
lowerCAmelCase_ : Dict = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowerCAmelCase_ : Tuple = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
lowerCAmelCase_ : Any = model.to(config.torch_dtype )
torch.save(model.state_dict() , A__ )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(A__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
__A : Dict = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 120
| 0
|
"""simple docstring"""
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
__SCREAMING_SNAKE_CASE = True
from torch.cuda.amp import autocast
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def lowerCAmelCase_( lowercase_ : int=None , lowercase_ : int=None ) -> Optional[int]:
return field(default_factory=lambda: default , metadata=lowercase_ )
@dataclass
class lowerCamelCase_:
'''simple docstring'''
lowercase__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase__ : Optional[str] = field(
default=_a, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
lowercase__ : Optional[bool] = field(
default=_a, metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
lowercase__ : Optional[float] = field(
default=0.1, metadata={'help': 'The dropout ratio for the attention probabilities.'} )
lowercase__ : Optional[float] = field(
default=0.1, metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} )
lowercase__ : Optional[float] = field(
default=0.1, metadata={
'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'
}, )
lowercase__ : Optional[float] = field(
default=0.1, metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'}, )
lowercase__ : Optional[float] = field(
default=0.05, metadata={
'help': (
'Propability of each feature vector along the time axis to be chosen as the start of the vector'
'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'
'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'
)
}, )
lowercase__ : Optional[float] = field(default=0.0, metadata={'help': 'The LayerDrop probability.'} )
@dataclass
class lowerCamelCase_:
'''simple docstring'''
lowercase__ : Optional[str] = field(
default=_a, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowercase__ : Optional[str] = field(
default='train+validation', metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
}, )
lowercase__ : bool = field(
default=_a, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowercase__ : Optional[int] = field(
default=_a, metadata={'help': 'The number of processes to use for the preprocessing.'}, )
lowercase__ : Optional[int] = field(
default=_a, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
}, )
lowercase__ : Optional[int] = field(
default=_a, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of validation examples to this '
'value if set.'
)
}, )
lowercase__ : List[str] = list_field(
default=[',', '?', '.', '!', '-', ';', ':', '\"\"', '%', '\'', '\"', '�'], metadata={'help': 'A list of characters to remove from the transcripts.'}, )
@dataclass
class lowerCamelCase_:
'''simple docstring'''
lowercase__ : WavaVecaProcessor
lowercase__ : Union[bool, str] = True
lowercase__ : Optional[int] = None
lowercase__ : Optional[int] = None
lowercase__ : Optional[int] = None
lowercase__ : Optional[int] = None
def __call__( self , lowerCamelCase__ ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
_lowerCamelCase = [{"""input_values""": feature["""input_values"""]} for feature in features]
_lowerCamelCase = [{"""input_ids""": feature["""labels"""]} for feature in features]
_lowerCamelCase = self.processor.pad(
__lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
_lowerCamelCase = self.processor.pad(
labels=__lowerCamelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
_lowerCamelCase = labels_batch["""input_ids"""].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 )
_lowerCamelCase = labels
return batch
class lowerCamelCase_( _a ):
'''simple docstring'''
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
model.train()
_lowerCamelCase = self._prepare_inputs(__lowerCamelCase )
if self.use_amp:
with autocast():
_lowerCamelCase = self.compute_loss(__lowerCamelCase , __lowerCamelCase )
else:
_lowerCamelCase = self.compute_loss(__lowerCamelCase , __lowerCamelCase )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCamelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCamelCase = loss.sum() / (inputs["""labels"""] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
_lowerCamelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__lowerCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(__lowerCamelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__lowerCamelCase )
else:
loss.backward()
return loss.detach()
def lowerCAmelCase_( ) -> Any:
_lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_lowerCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , lowercase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_lowerCamelCase = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
_lowerCamelCase = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
_lowerCamelCase = F"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(lowercase_ : Tuple ):
_lowerCamelCase = re.sub(lowercase_ , '''''' , batch['''sentence'''] ).lower() + """ """
return batch
_lowerCamelCase = train_dataset.map(lowercase_ , remove_columns=['''sentence'''] )
_lowerCamelCase = eval_dataset.map(lowercase_ , remove_columns=['''sentence'''] )
def extract_all_chars(lowercase_ : List[str] ):
_lowerCamelCase = """ """.join(batch['''text'''] )
_lowerCamelCase = list(set(lowercase_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
_lowerCamelCase = train_dataset.map(
lowercase_ , batched=lowercase_ , batch_size=-1 , keep_in_memory=lowercase_ , remove_columns=train_dataset.column_names , )
_lowerCamelCase = train_dataset.map(
lowercase_ , batched=lowercase_ , batch_size=-1 , keep_in_memory=lowercase_ , remove_columns=eval_dataset.column_names , )
_lowerCamelCase = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
_lowerCamelCase = {v: k for k, v in enumerate(lowercase_ )}
_lowerCamelCase = vocab_dict[""" """]
del vocab_dict[" "]
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = len(lowercase_ )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(lowercase_ , lowercase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
_lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ )
_lowerCamelCase = WavaVecaProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
_lowerCamelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_lowerCamelCase = min(len(lowercase_ ) , data_args.max_train_samples )
_lowerCamelCase = train_dataset.select(range(lowercase_ ) )
if data_args.max_val_samples is not None:
_lowerCamelCase = eval_dataset.select(range(data_args.max_val_samples ) )
_lowerCamelCase = torchaudio.transforms.Resample(4_80_00 , 1_60_00 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(lowercase_ : Dict ):
_lowerCamelCase = torchaudio.load(batch['''path'''] )
_lowerCamelCase = resampler(lowercase_ ).squeeze().numpy()
_lowerCamelCase = 1_60_00
_lowerCamelCase = batch["""text"""]
return batch
_lowerCamelCase = train_dataset.map(
lowercase_ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_lowerCamelCase = eval_dataset.map(
lowercase_ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(lowercase_ : Union[str, Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
_lowerCamelCase = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(lowercase_ )
return batch
_lowerCamelCase = train_dataset.map(
lowercase_ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , )
_lowerCamelCase = eval_dataset.map(
lowercase_ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , )
# Metric
_lowerCamelCase = datasets.load_metric('''wer''' )
def compute_metrics(lowercase_ : Optional[int] ):
_lowerCamelCase = pred.predictions
_lowerCamelCase = np.argmax(lowercase_ , axis=-1 )
_lowerCamelCase = processor.tokenizer.pad_token_id
_lowerCamelCase = processor.batch_decode(lowercase_ )
# we do not want to group tokens when computing the metrics
_lowerCamelCase = processor.batch_decode(pred.label_ids , group_tokens=lowercase_ )
_lowerCamelCase = wer_metric.compute(predictions=lowercase_ , references=lowercase_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_lowerCamelCase = DataCollatorCTCWithPadding(processor=lowercase_ , padding=lowercase_ )
# Initialize our Trainer
_lowerCamelCase = CTCTrainer(
model=lowercase_ , data_collator=lowercase_ , args=lowercase_ , compute_metrics=lowercase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_lowerCamelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_lowerCamelCase = model_args.model_name_or_path
else:
_lowerCamelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_lowerCamelCase = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
_lowerCamelCase = train_result.metrics
_lowerCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase_ )
)
_lowerCamelCase = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics('''train''' , lowercase_ )
trainer.save_metrics('''train''' , lowercase_ )
trainer.save_state()
# Evaluation
_lowerCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_lowerCamelCase = trainer.evaluate()
_lowerCamelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowercase_ )
_lowerCamelCase = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics('''eval''' , lowercase_ )
trainer.save_metrics('''eval''' , lowercase_ )
return results
if __name__ == "__main__":
main()
| 368
|
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowerCamelCase_:
'''simple docstring'''
def __init__( self ):
_lowerCamelCase = ''''''
_lowerCamelCase = ''''''
_lowerCamelCase = []
_lowerCamelCase = 0
_lowerCamelCase = 2_5_6
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 0
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = cva.imread(lowerCamelCase__ , 0 )
_lowerCamelCase = copy.deepcopy(self.img )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label='''x''' )
_lowerCamelCase = np.sum(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
_lowerCamelCase = x[i] / self.k
self.sk += prk
_lowerCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCamelCase = int(last % last )
_lowerCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCamelCase__ )
_lowerCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCamelCase = self.img[j][i]
if num != self.last_list[num]:
_lowerCamelCase = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def snake_case__ ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def snake_case__ ( self ):
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__SCREAMING_SNAKE_CASE : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 73
| 0
|
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCAmelCase__ = logging.get_logger(__name__)
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self , __a ) -> int:
if isinstance(__a , __a ):
UpperCamelCase = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__(self , __a , __a , __a ) -> str:
if len(__a ) == 0 or len(__a ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(__a ) )
if isinstance(__a , __a ):
UpperCamelCase = [sequences]
UpperCamelCase = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__a )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(_lowercase )
class _lowerCamelCase ( _lowercase ):
def __init__(self , __a=ZeroShotClassificationArgumentHandler() , *__a , **__a ) -> Optional[int]:
UpperCamelCase = args_parser
super().__init__(*__a , **__a )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def snake_case_ (self ) -> Any:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def snake_case_ (self , __a , __a=True , __a=True , __a=TruncationStrategy.ONLY_FIRST , **__a ) -> Dict:
UpperCamelCase = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
UpperCamelCase = self.tokenizer.eos_token
try:
UpperCamelCase = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=__a , )
except Exception as e:
if "too short" in str(__a ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCamelCase = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def snake_case_ (self , **__a ) -> Optional[Any]:
if kwargs.get("multi_class" , __a ) is not None:
UpperCamelCase = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs["hypothesis_template"]
UpperCamelCase = {}
if "multi_label" in kwargs:
UpperCamelCase = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__(self , __a , *__a , **__a , ) -> str:
if len(__a ) == 0:
pass
elif len(__a ) == 1 and "candidate_labels" not in kwargs:
UpperCamelCase = args[0]
else:
raise ValueError(F"Unable to understand extra arguments {args}" )
return super().__call__(__a , **__a )
def snake_case_ (self , __a , __a=None , __a="This example is {}." ) -> Union[str, Any]:
UpperCamelCase , UpperCamelCase = self._args_parser(__a , __a , __a )
for i, (candidate_label, sequence_pair) in enumerate(zip(__a , __a ) ):
UpperCamelCase = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__a ) - 1,
**model_input,
}
def snake_case_ (self , __a ) -> Dict:
UpperCamelCase = inputs["candidate_label"]
UpperCamelCase = inputs["sequence"]
UpperCamelCase = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCamelCase = self.model(**__a )
UpperCamelCase = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def snake_case_ (self , __a , __a=False ) -> Tuple:
UpperCamelCase = [outputs["candidate_label"] for outputs in model_outputs]
UpperCamelCase = [outputs["sequence"] for outputs in model_outputs]
UpperCamelCase = np.concatenate([output["logits"].numpy() for output in model_outputs] )
UpperCamelCase = logits.shape[0]
UpperCamelCase = len(__a )
UpperCamelCase = N // n
UpperCamelCase = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__a ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCamelCase = self.entailment_id
UpperCamelCase = -1 if entailment_id == 0 else 0
UpperCamelCase = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCamelCase = np.exp(__a ) / np.exp(__a ).sum(-1 , keepdims=__a )
UpperCamelCase = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCamelCase = reshaped_outputs[..., self.entailment_id]
UpperCamelCase = np.exp(__a ) / np.exp(__a ).sum(-1 , keepdims=__a )
UpperCamelCase = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 153
|
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
lowerCAmelCase__ = '''
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
'''
lowerCAmelCase__ = '''
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
'''
lowerCAmelCase__ = '''
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
def snake_case_ (self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def snake_case_ (self , __a , __a , __a=None , __a=1 , __a="binary" , __a=None , __a="warn" , ) -> str:
UpperCamelCase = recall_score(
__a , __a , labels=__a , pos_label=__a , average=__a , sample_weight=__a , zero_division=__a , )
return {"recall": float(__a ) if score.size == 1 else score}
| 153
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['DeiTFeatureExtractor']
__snake_case = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 363
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] = KandinskyVaaControlnetPipeline
__lowerCamelCase : int = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowerCamelCase : Optional[int] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowerCamelCase : Optional[Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowerCamelCase : Dict = False
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return 100
@property
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Any ={
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase : List[Any] =UNetaDConditionModel(**snake_case__ )
return model
@property
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Any =VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] =self.dummy_unet
UpperCAmelCase : Tuple =self.dummy_movq
UpperCAmelCase : Union[str, Any] =DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=snake_case__ , )
UpperCAmelCase : Tuple ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCAmelCase__ ( self , snake_case__ , snake_case__=0 ) -> Any:
'''simple docstring'''
UpperCAmelCase : str =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
UpperCAmelCase : Tuple =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create hint
UpperCAmelCase : Tuple =floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith('''mps''' ):
UpperCAmelCase : Optional[int] =torch.manual_seed(snake_case__ )
else:
UpperCAmelCase : int =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase : List[str] ={
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] ='''cpu'''
UpperCAmelCase : List[Any] =self.get_dummy_components()
UpperCAmelCase : Tuple =self.pipeline_class(**snake_case__ )
UpperCAmelCase : Tuple =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Optional[int] =pipe(**self.get_dummy_inputs(snake_case__ ) )
UpperCAmelCase : str =output.images
UpperCAmelCase : List[str] =pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
UpperCAmelCase : Union[str, Any] =image[0, -3:, -3:, -1]
UpperCAmelCase : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Union[str, Any] =np.array(
[0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
UpperCAmelCase : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
UpperCAmelCase : int =torch.from_numpy(np.array(snake_case__ ) ).float() / 255.0
UpperCAmelCase : List[str] =hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCAmelCase : Dict =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
UpperCAmelCase : int =KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
UpperCAmelCase : str =pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : int ='''A robot, 4k photo'''
UpperCAmelCase : int =torch.Generator(device='''cuda''' ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase : List[str] =pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase : List[str] =torch.Generator(device='''cuda''' ).manual_seed(0 )
UpperCAmelCase : Dict =pipeline(
image_embeds=snake_case__ , negative_image_embeds=snake_case__ , hint=snake_case__ , generator=snake_case__ , num_inference_steps=100 , output_type='''np''' , )
UpperCAmelCase : List[Any] =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 78
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class a__ ( a_ ):
"""simple docstring"""
__lowerCamelCase = "deberta-v2"
def __init__( self , lowercase=128100 , lowercase=1536 , lowercase=24 , lowercase=24 , lowercase=6144 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=0 , lowercase=0.02 , lowercase=1e-7 , lowercase=False , lowercase=-1 , lowercase=0 , lowercase=True , lowercase=None , lowercase=0 , lowercase="gelu" , **lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = relative_attention
A__ = max_relative_positions
A__ = pad_token_id
A__ = position_biased_input
# Backwards compatibility
if type(_lowerCamelCase ) == str:
A__ = [x.strip() for x in pos_att_type.lower().split("|" )]
A__ = pos_att_type
A__ = vocab_size
A__ = layer_norm_eps
A__ = kwargs.get("pooler_hidden_size" , _lowerCamelCase )
A__ = pooler_dropout
A__ = pooler_hidden_act
class a__ ( a_ ):
"""simple docstring"""
@property
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
if self.task == "multiple-choice":
A__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
A__ = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return 12
def UpperCamelCase ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , lowercase = 3 , lowercase = 40 , lowercase = 40 , lowercase = None , ) -> Any:
'''simple docstring'''
A__ = super().generate_dummy_inputs(preprocessor=_lowerCamelCase , framework=_lowerCamelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 68
|
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
_UpperCamelCase : List[Any] = 4
_UpperCamelCase : Optional[Any] = 3
class a ( a_ ):
pass
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] ):
'''simple docstring'''
for shard in shards:
for i in range(__snake_case ):
yield {"i": i, "shard": shard}
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = int(os.environ['RANK'] )
lowercase = int(os.environ['WORLD_SIZE'] )
lowercase = ArgumentParser()
parser.add_argument('--streaming' , type=__snake_case )
parser.add_argument('--local_rank' , type=__snake_case )
parser.add_argument('--num_workers' , type=__snake_case , default=0 )
lowercase = parser.parse_args()
lowercase = args.streaming
lowercase = args.num_workers
lowercase = {'shards': [f'shard_{shard_idx}' for shard_idx in range(__snake_case )]}
lowercase = IterableDataset.from_generator(__snake_case , gen_kwargs=__snake_case )
if not streaming:
lowercase = Dataset.from_list(list(__snake_case ) )
lowercase = split_dataset_by_node(__snake_case , rank=__snake_case , world_size=__snake_case )
lowercase = torch.utils.data.DataLoader(__snake_case , num_workers=__snake_case )
lowercase = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main()
| 220
| 0
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCamelCase__ = 1_6
UpperCamelCase__ = 3_2
def lowerCAmelCase_ ( __A, __A = 16, __A = "bert-base-cased" ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = AutoTokenizer.from_pretrained(__A )
UpperCAmelCase__ = load_dataset("glue", "mrpc" )
def tokenize_function(__A ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ = tokenizer(examples["sentence1"], examples["sentence2"], truncation=__A, max_length=__A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase__ = datasets.map(
__A, batched=__A, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=__A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(__A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__A, padding="max_length", max_length=128, return_tensors="pt" )
return tokenizer.pad(__A, padding="longest", return_tensors="pt" )
# Instantiate dataloaders.
UpperCAmelCase__ = DataLoader(
tokenized_datasets["train"], shuffle=__A, collate_fn=__A, batch_size=__A )
UpperCAmelCase__ = DataLoader(
tokenized_datasets["validation"], shuffle=__A, collate_fn=__A, batch_size=__A )
return train_dataloader, eval_dataloader
def lowerCAmelCase_ ( __A, __A, __A, __A ) -> int:
'''simple docstring'''
model.eval()
UpperCAmelCase__ = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ = model(**__A )
UpperCAmelCase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase__ , UpperCAmelCase__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__A ) - 1:
UpperCAmelCase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__A, references=__A, )
UpperCAmelCase__ = metric.compute()
return eval_metric["accuracy"]
def lowerCAmelCase_ ( __A, __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ = config["lr"]
UpperCAmelCase__ = int(config["num_epochs"] )
UpperCAmelCase__ = int(config["seed"] )
UpperCAmelCase__ = int(config["batch_size"] )
UpperCAmelCase__ = args.model_name_or_path
set_seed(__A )
UpperCAmelCase__ , UpperCAmelCase__ = get_dataloaders(__A, __A, __A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(__A, return_dict=__A )
# Instantiate optimizer
UpperCAmelCase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase__ = optimizer_cls(params=model.parameters(), lr=__A )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
UpperCAmelCase__ = 1
UpperCAmelCase__ = (len(__A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=__A, num_warmup_steps=0, num_training_steps=__A, )
else:
UpperCAmelCase__ = DummyScheduler(__A, total_num_steps=__A, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = accelerator.prepare(
__A, __A, __A, __A, __A )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase__ = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase__ = 0
UpperCAmelCase__ = evaluate.load("glue", "mrpc" )
UpperCAmelCase__ = num_epochs
if args.partial_train_epoch is not None:
UpperCAmelCase__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase__ = args.resume_from_checkpoint.split("epoch_" )[1]
UpperCAmelCase__ = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCAmelCase__ = int(__A ) + 1
UpperCAmelCase__ = evaluation_loop(__A, __A, __A, __A )
accelerator.print("resumed checkpoint performance:", __A )
accelerator.print("resumed checkpoint's scheduler's lr:", lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:", optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir, f"""state_{starting_epoch-1}.json""" ), "r" ) as f:
UpperCAmelCase__ = json.load(__A )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCAmelCase__ = {}
for epoch in range(__A, __A ):
model.train()
for step, batch in enumerate(__A ):
UpperCAmelCase__ = model(**__A )
UpperCAmelCase__ = outputs.loss
UpperCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(__A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCAmelCase__ = f"""epoch_{epoch}"""
UpperCAmelCase__ = os.path.join(args.output_dir, __A )
accelerator.save_state(__A )
UpperCAmelCase__ = evaluation_loop(__A, __A, __A, __A )
UpperCAmelCase__ = accuracy
UpperCAmelCase__ = lr_scheduler.get_lr()[0]
UpperCAmelCase__ = optimizer.param_groups[0]["lr"]
UpperCAmelCase__ = epoch
UpperCAmelCase__ = overall_step
accelerator.print(f"""epoch {epoch}:""", __A )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, f"""state_{epoch}.json""" ), "w" ) as f:
json.dump(__A, __A )
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path", type=__A, default="bert-base-cased", help="Path to pretrained model or model identifier from huggingface.co/models.", required=__A, )
parser.add_argument(
"--output_dir", type=__A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--resume_from_checkpoint", type=__A, default=__A, help="If the training should continue from a checkpoint folder.", )
parser.add_argument(
"--partial_train_epoch", type=__A, default=__A, help="If passed, the training will stop after this number of epochs.", )
parser.add_argument(
"--num_epochs", type=__A, default=2, help="Number of train epochs.", )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__A, __A )
if __name__ == "__main__":
main()
| 143
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = Dict[str, Any]
UpperCamelCase__ = List[Prediction]
@add_end_docstrings(UpperCAmelCase_ )
class A ( UpperCAmelCase_ ):
def __init__(self : Any , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def lowercase_ (self : Optional[Any] , **__UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = {}
if "threshold" in kwargs:
UpperCAmelCase__ = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__(self : Optional[int] , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : Dict ) -> Union[Predictions, List[Prediction]]:
"""simple docstring"""
return super().__call__(*__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = load_image(__UpperCAmelCase )
UpperCAmelCase__ = torch.IntTensor([[image.height, image.width]] )
UpperCAmelCase__ = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
UpperCAmelCase__ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
UpperCAmelCase__ = target_size
return inputs
def lowercase_ (self : List[str] , __UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = model_inputs.pop("target_size" )
UpperCAmelCase__ = self.model(**__UpperCAmelCase )
UpperCAmelCase__ = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
UpperCAmelCase__ = model_inputs["bbox"]
return model_outputs
def lowercase_ (self : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict=0.9 ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
UpperCAmelCase__ , UpperCAmelCase__ = target_size[0].tolist()
def unnormalize(__UpperCAmelCase : List[str] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_0_0_0),
(height * bbox[1] / 1_0_0_0),
(width * bbox[2] / 1_0_0_0),
(height * bbox[3] / 1_0_0_0),
] ) )
UpperCAmelCase__ , UpperCAmelCase__ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
UpperCAmelCase__ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
UpperCAmelCase__ = [unnormalize(__UpperCAmelCase ) for bbox in model_outputs["bbox"].squeeze(0 )]
UpperCAmelCase__ = ["score", "label", "box"]
UpperCAmelCase__ = [dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) for vals in zip(scores.tolist() , __UpperCAmelCase , __UpperCAmelCase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
UpperCAmelCase__ = self.image_processor.post_process_object_detection(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = raw_annotations[0]
UpperCAmelCase__ = raw_annotation["scores"]
UpperCAmelCase__ = raw_annotation["labels"]
UpperCAmelCase__ = raw_annotation["boxes"]
UpperCAmelCase__ = scores.tolist()
UpperCAmelCase__ = [self.model.config.idalabel[label.item()] for label in labels]
UpperCAmelCase__ = [self._get_bounding_box(__UpperCAmelCase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
UpperCAmelCase__ = ["score", "label", "box"]
UpperCAmelCase__ = [
dict(zip(__UpperCAmelCase , __UpperCAmelCase ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def lowercase_ (self : List[str] , __UpperCAmelCase : "torch.Tensor" ) -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = box.int().tolist()
UpperCAmelCase__ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 143
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.