code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_A = logging.get_logger('''transformers.models.encodec''')
_A = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
_A = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
_A = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
_A = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
_A = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
_A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_A = []
_A = []
def __UpperCamelCase ( _A , _A , _A , _A , _A ):
for attribute in key.split('''.''' ):
lowerCAmelCase_ = getattr(_A , _A )
if weight_type is not None:
lowerCAmelCase_ = getattr(_A , _A ).shape
else:
lowerCAmelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
lowerCAmelCase_ = value
elif weight_type == "weight_g":
lowerCAmelCase_ = value
elif weight_type == "weight_v":
lowerCAmelCase_ = value
elif weight_type == "bias":
lowerCAmelCase_ = value
elif weight_type == "running_mean":
lowerCAmelCase_ = value
elif weight_type == "running_var":
lowerCAmelCase_ = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase_ = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase_ = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase_ = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase_ = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase_ = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase_ = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase_ = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase_ = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase_ = value
else:
lowerCAmelCase_ = value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def __UpperCamelCase ( _A , _A ):
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase_ , lowerCAmelCase_ = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase_ = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase_ = MAPPING_48K
else:
raise ValueError(f"Unsupported model: {model_name}" )
for name, value in orig_dict.items():
if should_ignore(_A , _A ):
logger.info(f"{name} was ignored" )
continue
lowerCAmelCase_ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase_ , lowerCAmelCase_ = key.split('''.*.''' )
if prefix in name and suffix in name:
lowerCAmelCase_ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
lowerCAmelCase_ = True
if "*" in mapped_key:
lowerCAmelCase_ = name.split(_A )[0].split('''.''' )[-2]
lowerCAmelCase_ = mapped_key.replace('''*''' , _A )
if "weight_g" in name:
lowerCAmelCase_ = '''weight_g'''
elif "weight_v" in name:
lowerCAmelCase_ = '''weight_v'''
elif "weight_ih_l0" in name:
lowerCAmelCase_ = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
lowerCAmelCase_ = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
lowerCAmelCase_ = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
lowerCAmelCase_ = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
lowerCAmelCase_ = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
lowerCAmelCase_ = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
lowerCAmelCase_ = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
lowerCAmelCase_ = '''bias_hh_l1'''
elif "bias" in name:
lowerCAmelCase_ = '''bias'''
elif "weight" in name:
lowerCAmelCase_ = '''weight'''
elif "running_mean" in name:
lowerCAmelCase_ = '''running_mean'''
elif "running_var" in name:
lowerCAmelCase_ = '''running_var'''
elif "num_batches_tracked" in name:
lowerCAmelCase_ = '''num_batches_tracked'''
else:
lowerCAmelCase_ = None
set_recursively(_A , _A , _A , _A , _A )
continue
if not is_used:
unused_weights.append(_A )
logger.warning(f"Unused weights: {unused_weights}" )
@torch.no_grad()
def __UpperCamelCase ( _A , _A , _A , _A=None , _A=None , ):
if config_path is not None:
lowerCAmelCase_ = EncodecConfig.from_pretrained(_A )
else:
lowerCAmelCase_ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase_ = [8, 5, 4, 4]
lowerCAmelCase_ = [2.2]
lowerCAmelCase_ = 64
lowerCAmelCase_ = 32000
lowerCAmelCase_ = 2048
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
elif model_name == "encodec_48khz":
lowerCAmelCase_ = [8, 5, 4, 2]
lowerCAmelCase_ = [3.0, 6.0, 1_2.0, 2_4.0]
lowerCAmelCase_ = 48000
lowerCAmelCase_ = 2
lowerCAmelCase_ = False
lowerCAmelCase_ = '''time_group_norm'''
lowerCAmelCase_ = True
lowerCAmelCase_ = 1.0
lowerCAmelCase_ = 0.0_1
else:
raise ValueError(f"Unknown model name: {model_name}" )
lowerCAmelCase_ = EncodecModel(_A )
lowerCAmelCase_ = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_A )
lowerCAmelCase_ = torch.load(_A )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase_ = original_checkpoint['''best_state''']
recursively_load_weights(_A , _A , _A )
model.save_pretrained(_A )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(_A )
model.push_to_hub(_A )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
_A = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 431
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 431
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
a : Any = None
a : List[Any] = logging.get_logger(__name__)
a : int = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
a : int = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
a : Optional[Any] = {
'google/rembert': 256,
}
a : Tuple = '▁'
class _UpperCamelCase ( __lowercase ):
'''simple docstring'''
__lowercase : Optional[Any] = VOCAB_FILES_NAMES
__lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = RemBertTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase="[CLS]" , __lowercase="[SEP]" , __lowercase="<unk>" , __lowercase="[SEP]" , __lowercase="<pad>" , __lowercase="[CLS]" , __lowercase="[MASK]" , **__lowercase , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , **__a , )
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = remove_space
UpperCAmelCase__ = keep_accents
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = False if not self.vocab_file else True
def A__ ( self , __lowercase , __lowercase = None ):
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self , __lowercase , __lowercase = None , __lowercase = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1]
def A__ ( self , __lowercase , __lowercase = None ):
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , __lowercase , __lowercase = None ):
if not os.path.isdir(__a ):
logger.error("""Vocabulary path ({}) should be a directory""".format(__a ) )
return
UpperCAmelCase__ = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 720
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase__ = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCAmelCase__ = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
UpperCAmelCase__ = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__lowercase , __lowercase )
def A__ ( self , **__lowercase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def A__ ( self , **__lowercase ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def A__ ( self ):
shutil.rmtree(self.tmpdirname )
def A__ ( self ):
UpperCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase__ = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A__ ( self ):
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def A__ ( self ):
UpperCAmelCase__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCAmelCase__ = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
UpperCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def A__ ( self ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = image_processor(__lowercase , return_tensors="""np""" )
UpperCAmelCase__ = processor(images=__lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A__ ( self ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase__ = """lower newer"""
UpperCAmelCase__ = processor(text=__lowercase )
UpperCAmelCase__ = tokenizer(__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A__ ( self ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase__ = """lower newer"""
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(__lowercase ):
processor()
def A__ ( self ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase__ = processor.batch_decode(__lowercase )
UpperCAmelCase__ = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def A__ ( self ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase__ = """lower newer"""
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 422
| 0
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=32 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=[10, 20, 30, 40] , UpperCamelCase=[2, 2, 3, 2] , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=10 , UpperCamelCase=0.02 , UpperCamelCase=["stage2", "stage3", "stage4"] , UpperCamelCase=3 , UpperCamelCase=None , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = num_stages
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = out_features
lowerCamelCase_ = num_labels
lowerCamelCase_ = scope
lowerCamelCase_ = num_stages
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def snake_case ( self ):
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCamelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = UperNetForSemanticSegmentation(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCamelCase_ = model(UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_lowerCamelCase = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = UperNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self ):
"""simple docstring"""
return
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCamelCase )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="UperNet does not have a base model" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="UperNet does not have a base model" )
def snake_case ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = _config_zero_init(UpperCamelCase )
lowerCamelCase_ = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason="UperNet does not have tied weights" )
def snake_case ( self ):
"""simple docstring"""
pass
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = UperNetForSemanticSegmentation.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def __snake_case ( ):
lowerCamelCase_ = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
lowerCamelCase_ = Image.open(UpperCAmelCase_ ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
lowerCamelCase_ = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(UpperCamelCase )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = processor(images=UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase )
with torch.no_grad():
lowerCamelCase_ = model(**UpperCamelCase )
lowerCamelCase_ = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
lowerCamelCase_ = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(UpperCamelCase )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = processor(images=UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase )
with torch.no_grad():
lowerCamelCase_ = model(**UpperCamelCase )
lowerCamelCase_ = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
| 675
|
'''simple docstring'''
import os
import sys
import unittest
a_ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a_ : Tuple = os.path.join(git_repo_path, """src""", """transformers""")
a_ : List[Any] = """
{0} = None
"""
a_ : Optional[Any] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
a_ : str = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" )
self.assertIsNone(UpperCamelCase )
lowerCamelCase_ = find_backend(" if not is_tokenizers_available():" )
self.assertEqual(UpperCamelCase , "tokenizers" )
lowerCamelCase_ = find_backend(" if not is_tensorflow_text_available():" )
self.assertEqual(UpperCamelCase , "tensorflow_text" )
lowerCamelCase_ = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" )
self.assertEqual(UpperCamelCase , "sentencepiece_and_tokenizers" )
lowerCamelCase_ = find_backend(
" if not (is_sentencepiece_available() and is_tensorflow_text_available()):" )
self.assertEqual(UpperCamelCase , "sentencepiece_and_tensorflow_text" )
lowerCamelCase_ = find_backend(
" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" )
self.assertEqual(UpperCamelCase , "sentencepiece_and_tokenizers_and_vision" )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , UpperCamelCase )
self.assertIn("tensorflow_text" , UpperCamelCase )
self.assertIn("sentencepiece_and_tokenizers" , UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertModel" , objects["tf"] )
self.assertIn("FlaxBertModel" , objects["flax"] )
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] )
self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(UpperCamelCase , "\nCONSTANT = None\n" )
lowerCamelCase_ = create_dummy_object("function" , "'torch'" )
self.assertEqual(
UpperCamelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
lowerCamelCase_ = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n"
lowerCamelCase_ = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n"
lowerCamelCase_ = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , UpperCamelCase )
| 675
| 1
|
'''simple docstring'''
def _lowercase ( UpperCamelCase__ : list, UpperCamelCase__ : list ):
_validate_point(UpperCamelCase__ )
_validate_point(UpperCamelCase__ )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(UpperCamelCase__, UpperCamelCase__ ) ) )
def _lowercase ( UpperCamelCase__ : list[float] ):
if point:
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
for item in point:
if not isinstance(UpperCamelCase__, (int, float) ):
__A : List[Any] = (
'Expected a list of numbers as input, found '
f"""{type(UpperCamelCase__ ).__name__}"""
)
raise TypeError(UpperCamelCase__ )
else:
__A : Optional[Any] = f"""Expected a list of numbers as input, found {type(UpperCamelCase__ ).__name__}"""
raise TypeError(UpperCamelCase__ )
else:
raise ValueError('Missing an input' )
def _lowercase ( UpperCamelCase__ : list, UpperCamelCase__ : list ):
_validate_point(UpperCamelCase__ )
_validate_point(UpperCamelCase__ )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(UpperCamelCase__, UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 540
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
def _lowercase ( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Tuple=False ):
__A : List[str] = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__A : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def _lowercase ( UpperCamelCase__ : Any, UpperCamelCase__ : int, UpperCamelCase__ : List[Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__A : Optional[Any] = ''
else:
__A : int = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__A : str = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
__A : str = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__A : List[Any] = in_proj_weight[
: config.hidden_size, :
]
__A : List[Any] = in_proj_bias[: config.hidden_size]
__A : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__A : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__A : Dict = in_proj_weight[
-config.hidden_size :, :
]
__A : Optional[Any] = in_proj_bias[-config.hidden_size :]
def _lowercase ( UpperCamelCase__ : Optional[Any] ):
__A : Any = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(UpperCamelCase__, UpperCamelCase__ )
def _lowercase ( UpperCamelCase__ : Any, UpperCamelCase__ : Any, UpperCamelCase__ : Union[str, Any] ):
__A : Dict = dct.pop(UpperCamelCase__ )
__A : Optional[Any] = val
def _lowercase ( ):
__A : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__A : Optional[Any] = Image.open(requests.get(UpperCamelCase__, stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def _lowercase ( UpperCamelCase__ : Tuple, UpperCamelCase__ : int, UpperCamelCase__ : Any=False ):
__A : Optional[Any] = BitConfig(
global_padding='same', layer_type='bottleneck', depths=(3, 4, 9), out_features=['stage3'], embedding_dynamic_padding=UpperCamelCase__, )
__A : str = ViTHybridConfig(backbone_config=UpperCamelCase__, image_size=384, num_labels=1000 )
__A : Union[str, Any] = False
# load original model from timm
__A : List[Any] = timm.create_model(UpperCamelCase__, pretrained=UpperCamelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__A : Optional[int] = timm_model.state_dict()
if base_model:
remove_classification_head_(UpperCamelCase__ )
__A : List[str] = create_rename_keys(UpperCamelCase__, UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
__A : List[str] = 'huggingface/label-files'
__A : Optional[Any] = 'imagenet-1k-id2label.json'
__A : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase__, UpperCamelCase__, repo_type='dataset' ), 'r' ) )
__A : Optional[int] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__A : List[Any] = idalabel
__A : Tuple = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
__A : List[Any] = ViTHybridModel(UpperCamelCase__ ).eval()
else:
__A : int = ViTHybridForImageClassification(UpperCamelCase__ ).eval()
model.load_state_dict(UpperCamelCase__ )
# create image processor
__A : Tuple = create_transform(**resolve_data_config({}, model=UpperCamelCase__ ) )
__A : Union[str, Any] = transform.transforms
__A : str = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
__A : List[str] = ViTHybridImageProcessor(
do_resize=UpperCamelCase__, size={'shortest_edge': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=UpperCamelCase__, crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]}, do_normalize=UpperCamelCase__, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
__A : Optional[Any] = prepare_img()
__A : Any = transform(UpperCamelCase__ ).unsqueeze(0 )
__A : Dict = processor(UpperCamelCase__, return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(UpperCamelCase__, UpperCamelCase__ )
# verify logits
with torch.no_grad():
__A : Union[str, Any] = model(UpperCamelCase__ )
__A : Dict = outputs.logits
print('Predicted class:', logits.argmax(-1 ).item() )
if base_model:
__A : Tuple = timm_model.forward_features(UpperCamelCase__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(UpperCamelCase__, outputs.pooler_output, atol=1E-3 )
else:
__A : Any = timm_model(UpperCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase__, outputs.logits, atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(f"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(f"""ybelkada/{vit_name}""" )
processor.push_to_hub(f"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 540
| 1
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_A = logging.getLogger(__name__)
class __UpperCAmelCase ( a_ ):
"""simple docstring"""
_snake_case : Tuple = 'sequence-classification'
def __init__( self : Dict , A_ : Union[str, Any] )-> Optional[int]:
if type(a_ ) == dict:
__UpperCamelCase = Namespace(**a_ )
__UpperCamelCase = glue_output_modes[hparams.task]
__UpperCamelCase = glue_tasks_num_labels[hparams.task]
super().__init__(a_ , a_ , self.mode )
def A ( self : int , **A_ : List[str] )-> List[str]:
return self.model(**a_ )
def A ( self : Tuple , A_ : Dict , A_ : int )-> Tuple:
__UpperCamelCase = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__UpperCamelCase = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__UpperCamelCase = self(**a_ )
__UpperCamelCase = outputs[0]
__UpperCamelCase = self.trainer.lr_schedulers[0]["scheduler"]
__UpperCamelCase = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : Optional[int] )-> List[str]:
__UpperCamelCase = self.hparams
__UpperCamelCase = processors[args.task]()
__UpperCamelCase = processor.get_labels()
for mode in ["train", "dev"]:
__UpperCamelCase = self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , a_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
__UpperCamelCase = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
__UpperCamelCase = convert_examples_to_features(
a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , a_ )
torch.save(a_ , a_ )
def A ( self : List[str] , A_ : Optional[Any] , A_ : List[Any] , A_ : int = False )-> Any:
__UpperCamelCase = "dev" if mode == "test" else mode
__UpperCamelCase = self._feature_file(a_ )
logger.info("Loading features from cached file %s" , a_ )
__UpperCamelCase = torch.load(a_ )
__UpperCamelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__UpperCamelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__UpperCamelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__UpperCamelCase = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__UpperCamelCase = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a_ , a_ , a_ , a_ ) , batch_size=a_ , shuffle=a_ , )
def A ( self : Dict , A_ : Optional[int] , A_ : str )-> List[Any]:
__UpperCamelCase = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__UpperCamelCase = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__UpperCamelCase = self(**a_ )
__UpperCamelCase = outputs[:2]
__UpperCamelCase = logits.detach().cpu().numpy()
__UpperCamelCase = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : List[str] , A_ : List[str] )-> int:
__UpperCamelCase = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
__UpperCamelCase = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__UpperCamelCase = np.argmax(a_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__UpperCamelCase = np.squeeze(a_ )
__UpperCamelCase = np.concatenate([x["target"] for x in outputs] , axis=0 )
__UpperCamelCase = [[] for _ in range(out_label_ids.shape[0] )]
__UpperCamelCase = [[] for _ in range(out_label_ids.shape[0] )]
__UpperCamelCase = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_ )}
__UpperCamelCase = dict(results.items() )
__UpperCamelCase = results
return ret, preds_list, out_label_list
def A ( self : Tuple , A_ : Any )-> List[str]:
__UpperCamelCase = self._eval_end(a_ )
__UpperCamelCase = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : Any , A_ : List[str] )-> Dict:
__UpperCamelCase = self._eval_end(a_ )
__UpperCamelCase = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( A_ : Dict , A_ : List[str] )-> int:
BaseTransformer.add_model_specific_args(a_ , a_ )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=a_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=a_ , required=a_ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=a_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def lowercase () -> Optional[int]:
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser()
add_generic_args(SCREAMING_SNAKE_CASE__ ,os.getcwd() )
__UpperCamelCase = GLUETransformer.add_model_specific_args(SCREAMING_SNAKE_CASE__ ,os.getcwd() )
__UpperCamelCase = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__UpperCamelCase = os.path.join(
"./results" ,f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" ,)
os.makedirs(args.output_dir )
__UpperCamelCase = GLUETransformer(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase = generic_train(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir ,"checkpoint-epoch=*.ckpt" ) ,recursive=SCREAMING_SNAKE_CASE__ ) )
__UpperCamelCase = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 505
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
SCREAMING_SNAKE_CASE_ = """examples/"""
SCREAMING_SNAKE_CASE_ = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
SCREAMING_SNAKE_CASE_ = {
"""init""": """src/diffusers/__init__.py""",
"""setup""": """setup.py""",
}
SCREAMING_SNAKE_CASE_ = """README.md"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
with open(SCREAMING_SNAKE_CASE__, "r", encoding="utf-8", newline="\n" ) as f:
a_ : Union[str, Any] = f.read()
a_ , a_ : List[str] = REPLACE_PATTERNS[pattern]
a_ : List[Any] = replace.replace("VERSION", SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = re_pattern.sub(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__, "w", encoding="utf-8", newline="\n" ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), SCREAMING_SNAKE_CASE__, pattern="examples" )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if not patch:
update_version_in_examples(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( ) -> Union[str, Any]:
a_ : int = "🤗 Transformers currently provides the following architectures"
a_ : str = "1. Want to contribute a new model?"
with open(SCREAMING_SNAKE_CASE__, "r", encoding="utf-8", newline="\n" ) as f:
a_ : Union[str, Any] = f.readlines()
# Find the start of the list.
a_ : List[str] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
a_ : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
a_ : str = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc", "https://huggingface.co/docs/diffusers/model_doc", )
index += 1
with open(SCREAMING_SNAKE_CASE__, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( ) -> Optional[Any]:
with open(REPLACE_FILES["init"], "r" ) as f:
a_ : Dict = f.read()
a_ : Union[str, Any] = REPLACE_PATTERNS["init"][0].search(SCREAMING_SNAKE_CASE__ ).groups()[0]
return packaging.version.parse(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__=False ) -> int:
a_ : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
a_ : Optional[int] = default_version.base_version
elif patch:
a_ : List[Any] = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
a_ : str = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
a_ : Any = input(F"""Which version are you releasing? [{default_version}]""" )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
a_ : Optional[Any] = default_version
print(F"""Updating version to {version}.""" )
global_version_update(SCREAMING_SNAKE_CASE__, patch=SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( ) -> List[str]:
a_ : Optional[Any] = get_version()
a_ : Optional[int] = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
a_ : Dict = current_version.base_version
# Check with the user we got that right.
a_ : Any = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
a_ : int = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(SCREAMING_SNAKE_CASE__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
SCREAMING_SNAKE_CASE_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 237
| 0
|
'''simple docstring'''
from __future__ import annotations
class __magic_name__:
def __init__( self : Dict , __UpperCamelCase : str , __UpperCamelCase : str ):
'''simple docstring'''
snake_case__ , snake_case__ = text, pattern
snake_case__ , snake_case__ = len(__UpperCamelCase ), len(__UpperCamelCase )
def __lowerCAmelCase( self : Dict , __UpperCamelCase : str ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __lowerCAmelCase( self : Any , __UpperCamelCase : int ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __lowerCAmelCase( self : str ):
'''simple docstring'''
snake_case__ = []
for i in range(self.textLen - self.patLen + 1 ):
snake_case__ = self.mismatch_in_text(__UpperCamelCase )
if mismatch_index == -1:
positions.append(__UpperCamelCase )
else:
snake_case__ = self.match_in_pattern(self.text[mismatch_index] )
snake_case__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
a__ = '''ABAABA'''
a__ = '''AB'''
a__ = BoyerMooreSearch(text, pattern)
a__ = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 566
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def snake_case__ ( a ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def snake_case__ ( a , a , a ) -> np.ndarray:
'''simple docstring'''
snake_case__ = np.nan
for i in range(a ):
snake_case__ = features[:, labels == i]
snake_case__ = data.mean(1 )
# Centralize the data of class i
snake_case__ = data - column_reshape(a )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(a , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
snake_case__ = np.dot(a , centered_data.T )
return covariance_sum / features.shape[1]
def snake_case__ ( a , a , a ) -> np.ndarray:
'''simple docstring'''
snake_case__ = features.mean(1 )
snake_case__ = np.nan
for i in range(a ):
snake_case__ = features[:, labels == i]
snake_case__ = data.shape[1]
snake_case__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(a ) - column_reshape(a ) , (column_reshape(a ) - column_reshape(a )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
snake_case__ = device_data * np.dot(
column_reshape(a ) - column_reshape(a ) , (column_reshape(a ) - column_reshape(a )).T , )
return covariance_sum / features.shape[1]
def snake_case__ ( a , a ) -> np.ndarray:
'''simple docstring'''
if features.any():
snake_case__ = features.mean(1 )
# Center the dataset
snake_case__ = features - np.reshape(a , (data_mean.size, 1) )
snake_case__ = np.dot(a , centered_data.T ) / features.shape[1]
snake_case__ , snake_case__ = np.linalg.eigh(a )
# Take all the columns in the reverse order (-1), and then takes only the first
snake_case__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
snake_case__ = np.dot(filtered_eigenvectors.T , a )
logging.info("""Principal Component Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=a )
logging.error("""Dataset empty""" )
raise AssertionError
def snake_case__ ( a , a , a , a ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
snake_case__ , snake_case__ = eigh(
covariance_between_classes(a , a , a ) , covariance_within_classes(a , a , a ) , )
snake_case__ = eigenvectors[:, ::-1][:, :dimensions]
snake_case__ , snake_case__ , snake_case__ = np.linalg.svd(a )
snake_case__ = svd_matrix[:, 0:dimensions]
snake_case__ = np.dot(filtered_svd_matrix.T , a )
logging.info("""Linear Discriminant Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=a )
logging.error("""Dataset empty""" )
raise AssertionError
def snake_case__ ( ) -> None:
'''simple docstring'''
snake_case__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
snake_case__ = np.array([0, 0, 0, 1, 1] )
snake_case__ = 2
snake_case__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(a ) as error_info:
snake_case__ = linear_discriminant_analysis(
a , a , a , a )
if isinstance(a , np.ndarray ):
raise AssertionError(
"""Did not raise AssertionError for dimensions > classes""" )
assert error_info.type is AssertionError
def snake_case__ ( ) -> None:
'''simple docstring'''
snake_case__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
snake_case__ = 2
snake_case__ = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(a ) as error_info:
snake_case__ = principal_component_analysis(a , a )
if not np.allclose(a , a ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 566
| 1
|
import operator as op
def A__ ( lowercase: Any ) -> List[Any]:
A : Dict =[]
A : Optional[Any] =lambda lowercase, lowercase : int(x / y ) # noqa: E731 integer division operation
A : Optional[Any] ={
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ), 'Action'.center(12 ), 'Stack', sep=' | ' )
print('-' * (30 + len(lowercase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowercase ) # append x to stack
# output in tabular format
print(x.rjust(8 ), ('push(' + x + ')').ljust(12 ), ','.join(lowercase ), sep=' | ' )
else:
A : Optional[int] =stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ), ('pop(' + b + ')').ljust(12 ), ','.join(lowercase ), sep=' | ' )
A : Optional[int] =stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ), ('pop(' + a + ')').ljust(12 ), ','.join(lowercase ), sep=' | ' )
stack.append(
str(opr[x](int(lowercase ), int(lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ), ('push(' + a + x + b + ')').ljust(12 ), ','.join(lowercase ), sep=' | ', )
return int(stack[0] )
if __name__ == "__main__":
_lowercase : Any =input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 305
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_lowercase : Dict ='''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def A__ ( lowercase: str, lowercase: Dict=None ) -> Tuple:
require_version(deps[pkg], lowercase )
| 305
| 1
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A :int = PegasusTokenizer
A :Tuple = PegasusTokenizerFast
A :Optional[Any] = True
A :Union[str, Any] = True
def _A ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a__ : Tuple = PegasusTokenizer(__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _A ( self ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def _A ( self , **__UpperCAmelCase ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
return ("This is a test", "This is a test")
def _A ( self ):
"""simple docstring"""
a__ : Dict = "</s>"
a__ : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "</s>" )
self.assertEqual(vocab_keys[-1] , "v" )
self.assertEqual(len(__UpperCAmelCase ) , 1103 )
def _A ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _A ( self ):
"""simple docstring"""
a__ : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
a__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname )
a__ : Optional[int] = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
a__ : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
a__ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : Dict = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
a__ : Any = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
a__ : Dict = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
a__ : Any = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : Optional[Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
a__ : Any = "To ensure a smooth flow of bank resolutions."
a__ : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
a__ : List[str] = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _A ( self ):
"""simple docstring"""
a__ : List[str] = ["This is going to be way too long." * 150, "short example"]
a__ : Optional[int] = ["not super long but more than 5 tokens", "tiny"]
a__ : Optional[int] = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" )
a__ : Union[str, Any] = self._large_tokenizer(
text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def _A ( self ):
"""simple docstring"""
a__ : Tuple = {"input_ids": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , )
@require_sentencepiece
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A :Union[str, Any] = PegasusTokenizer
A :Optional[Any] = PegasusTokenizerFast
A :Dict = True
A :str = True
def _A ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a__ : Union[str, Any] = PegasusTokenizer(__UpperCAmelCase , offset=0 , mask_token_sent=__UpperCAmelCase , mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _A ( self ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def _A ( self , **__UpperCAmelCase ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
return ("This is a test", "This is a test")
def _A ( self ):
"""simple docstring"""
a__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
a__ : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
a__ : Dict = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
a__ : List[str] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
a__ : List[Any] = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@require_torch
def _A ( self ):
"""simple docstring"""
a__ : Optional[Any] = ["This is going to be way too long." * 1000, "short example"]
a__ : List[Any] = ["not super long but more than 5 tokens", "tiny"]
a__ : List[str] = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" )
a__ : int = self._large_tokenizer(
text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask.
def _A ( self ):
"""simple docstring"""
a__ : Optional[Any] = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
a__ : Tuple = self._large_tokenizer(__UpperCAmelCase ).input_ids
self.assertListEqual(
__UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 713
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> int:
a__ : List[Any] = prime_factors(__UpperCamelCase )
if is_square_free(__UpperCamelCase ):
return -1 if len(__UpperCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207
| 0
|
'''simple docstring'''
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Optional[int]:
UpperCAmelCase__ : Optional[int] = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
UpperCAmelCase__ , UpperCAmelCase__ : int = input_paths_and_base_extractors[compression_format]
if input_path is None:
UpperCAmelCase__ : Tuple = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCAmelCase__ )
assert base_extractor.is_extractable(lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(lowerCAmelCase__ , lowerCAmelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ : Dict = file_path.read_text(encoding='''utf-8''' )
else:
UpperCAmelCase__ : Union[str, Any] = output_path.read_text(encoding='''utf-8''' )
UpperCAmelCase__ : Dict = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Optional[Any]:
UpperCAmelCase__ : Union[str, Any] = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
UpperCAmelCase__ : Dict = input_paths[compression_format]
if input_path is None:
UpperCAmelCase__ : int = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCAmelCase__ )
UpperCAmelCase__ : Any = Extractor.infer_extractor_format(lowerCAmelCase__ )
assert extractor_format is not None
UpperCAmelCase__ : Any = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ : Optional[int] = file_path.read_text(encoding='''utf-8''' )
else:
UpperCAmelCase__ : Optional[int] = output_path.read_text(encoding='''utf-8''' )
UpperCAmelCase__ : Optional[int] = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
import tarfile
UpperCAmelCase__ : int = tmp_path / '''data_dot_dot'''
directory.mkdir()
UpperCAmelCase__ : Tuple = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(lowerCAmelCase__ , '''w''' ) as f:
f.add(lowerCAmelCase__ , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def a__ ( lowerCAmelCase__ ) -> Any:
import tarfile
UpperCAmelCase__ : str = tmp_path / '''data_sym_link'''
directory.mkdir()
UpperCAmelCase__ : Optional[Any] = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=lowerCAmelCase__ )
with tarfile.TarFile(lowerCAmelCase__ , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
UpperCAmelCase__ : Dict = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
UpperCAmelCase__ : Any = insecure_tar_files[insecure_tar_file]
UpperCAmelCase__ : Dict = tmp_path / '''extracted'''
TarExtractor.extract(lowerCAmelCase__ , lowerCAmelCase__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
UpperCAmelCase__ : Dict = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
UpperCAmelCase__ : List[str] = (
b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(lowerCAmelCase__ )
assert zipfile.is_zipfile(str(lowerCAmelCase__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(lowerCAmelCase__ ) # but we're right
| 75
|
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
a_ : Union[str, Any] = NewType('DataClass', Any)
a_ : int = NewType('DataClassType', Any)
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = {str(UpperCAmelCase__ ): choice for choice in choices}
return lambda UpperCAmelCase__ : str_to_choice.get(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowercase( *,
UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = dataclasses.MISSING , UpperCAmelCase__ = dataclasses.MISSING , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCamelCase = {}
if aliases is not None:
lowerCamelCase = aliases
if help is not None:
lowerCamelCase = help
return dataclasses.field(metadata=UpperCAmelCase__ , default=UpperCAmelCase__ , default_factory=UpperCAmelCase__ , **UpperCAmelCase__ )
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
_A = 42
def __init__(self , __a , **__a ):
'''simple docstring'''
if "formatter_class" not in kwargs:
lowerCamelCase = ArgumentDefaultsHelpFormatter
super().__init__(**__a )
if dataclasses.is_dataclass(__a ):
lowerCamelCase = [dataclass_types]
lowerCamelCase = list(__a )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__a )
@staticmethod
def _a (__a , __a ):
'''simple docstring'''
lowerCamelCase = F"""--{field.name}"""
lowerCamelCase = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __a ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
lowerCamelCase = kwargs.pop("aliases" , [] )
if isinstance(__a , __a ):
lowerCamelCase = [aliases]
lowerCamelCase = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(__a , "UnionType" ) and isinstance(__a , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__a ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
F""" Problem encountered in field '{field.name}'.""" )
if type(__a ) not in field.type.__args__:
# filter `str` in Union
lowerCamelCase = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCamelCase = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCamelCase = (
field.type.__args__[0] if isinstance(__a , field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCamelCase = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCamelCase = {}
if origin_type is Literal or (isinstance(field.type , __a ) and issubclass(field.type , __a )):
if origin_type is Literal:
lowerCamelCase = field.type.__args__
else:
lowerCamelCase = [x.value for x in field.type]
lowerCamelCase = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
lowerCamelCase = field.default
else:
lowerCamelCase = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCamelCase = copy(__a )
# Hack because type=bool in argparse does not behave as we want.
lowerCamelCase = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCamelCase = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCamelCase = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCamelCase = "?"
# This is the value that will get picked if we do --field_name (without value)
lowerCamelCase = True
elif isclass(__a ) and issubclass(__a , __a ):
lowerCamelCase = field.type.__args__[0]
lowerCamelCase = "+"
if field.default_factory is not dataclasses.MISSING:
lowerCamelCase = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCamelCase = True
else:
lowerCamelCase = field.type
if field.default is not dataclasses.MISSING:
lowerCamelCase = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCamelCase = field.default_factory()
else:
lowerCamelCase = True
parser.add_argument(__a , *__a , **__a )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCamelCase = False
parser.add_argument(F"""--no_{field.name}""" , action="store_false" , dest=field.name , **__a )
def _a (self , __a ):
'''simple docstring'''
if hasattr(__a , "_argument_group_name" ):
lowerCamelCase = self.add_argument_group(dtype._argument_group_name )
else:
lowerCamelCase = self
try:
lowerCamelCase = get_type_hints(__a )
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__a ):
lowerCamelCase = ".".join(map(__a , sys.version_info[:3] ) )
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(__a ):
if not field.init:
continue
lowerCamelCase = type_hints[field.name]
self._parse_dataclass_field(__a , __a )
def _a (self , __a=None , __a=False , __a=True , __a=None , __a=None , ):
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCamelCase = []
if args_filename:
args_files.append(Path(__a ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCamelCase = ArgumentParser()
args_file_parser.add_argument(__a , type=__a , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCamelCase , lowerCamelCase = args_file_parser.parse_known_args(args=__a )
lowerCamelCase = vars(__a ).get(args_file_flag.lstrip("-" ) , __a )
if cmd_args_file_paths:
args_files.extend([Path(__a ) for p in cmd_args_file_paths] )
lowerCamelCase = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCamelCase = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCamelCase , lowerCamelCase = self.parse_known_args(args=__a )
lowerCamelCase = []
for dtype in self.dataclass_types:
lowerCamelCase = {f.name for f in dataclasses.fields(__a ) if f.init}
lowerCamelCase = {k: v for k, v in vars(__a ).items() if k in keys}
for k in keys:
delattr(__a , __a )
lowerCamelCase = dtype(**__a )
outputs.append(__a )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__a )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def _a (self , __a , __a = False ):
'''simple docstring'''
lowerCamelCase = set(args.keys() )
lowerCamelCase = []
for dtype in self.dataclass_types:
lowerCamelCase = {f.name for f in dataclasses.fields(__a ) if f.init}
lowerCamelCase = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCamelCase = dtype(**__a )
outputs.append(__a )
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(__a )}""" )
return tuple(__a )
def _a (self , __a , __a = False ):
'''simple docstring'''
with open(Path(__a ) , encoding="utf-8" ) as open_json_file:
lowerCamelCase = json.loads(open_json_file.read() )
lowerCamelCase = self.parse_dict(__a , allow_extra_keys=__a )
return tuple(__a )
def _a (self , __a , __a = False ):
'''simple docstring'''
lowerCamelCase = self.parse_dict(yaml.safe_load(Path(__a ).read_text() ) , allow_extra_keys=__a )
return tuple(__a )
| 623
| 0
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
def __init__( self :Tuple ,__UpperCAmelCase :Any ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : List[Any] = val
lowerCamelCase__ : Union[str, Any] = None
lowerCamelCase__ : str = None
def lowercase_ ( self :Optional[Any] ,__UpperCAmelCase :Any ) -> List[str]:
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
lowerCamelCase__ : Optional[Any] = Node(__UpperCAmelCase )
else:
self.left.insert(__UpperCAmelCase )
elif val > self.val:
if self.right is None:
lowerCamelCase__ : List[str] = Node(__UpperCAmelCase )
else:
self.right.insert(__UpperCAmelCase )
else:
lowerCamelCase__ : List[str] = val
def __a ( _lowercase , _lowercase ):
"""simple docstring"""
if root:
inorder(root.left , _lowercase )
res.append(root.val )
inorder(root.right , _lowercase )
def __a ( _lowercase ):
"""simple docstring"""
if len(_lowercase ) == 0:
return arr
lowerCamelCase__ : List[Any] = Node(arr[0] )
for i in range(1 , len(_lowercase ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowerCamelCase__ : Optional[int] = []
inorder(_lowercase , _lowercase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 708
|
"""simple docstring"""
def __a ( _lowercase ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
UpperCAmelCase : Tuple = int(input("Enter number: ").strip())
print(f'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
| 121
| 0
|
UpperCAmelCase_ : Optional[Any] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
UpperCAmelCase_ : str = [{"type": "code", "content": INSTALL_CONTENT}]
UpperCAmelCase_ : Union[str, Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 21
|
import gc
import threading
import time
import psutil
import torch
class lowerCAmelCase :
def __init__( self : str ) -> Union[str, Any]:
lowerCamelCase__ : Optional[Any] = psutil.Process()
lowerCamelCase__ : Union[str, Any] = False
def A_ ( self : Optional[int] ) -> int:
lowerCamelCase__ : Optional[Any] = -1
while True:
lowerCamelCase__ : Dict = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def A_ ( self : Tuple ) -> Dict:
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : List[str] = threading.Thread(target=self.peak_monitor )
lowerCamelCase__ : Union[str, Any] = True
self.thread.start()
def A_ ( self : str ) -> Dict:
lowerCamelCase__ : int = False
self.thread.join()
return self.cpu_memory_peak
_UpperCAmelCase : Dict = PeakCPUMemory()
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
# Time
lowerCamelCase__ : List[Any] = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowerCamelCase__ : List[str] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
lowerCamelCase__ : Union[str, Any] = torch.cuda.memory_allocated(_UpperCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
# Time
lowerCamelCase__ : Optional[int] = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowerCamelCase__ : Dict = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
lowerCamelCase__ : int = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
lowerCamelCase__ : List[str] = (torch.cuda.memory_allocated(_UpperCAmelCase ) - start_measures[str(_UpperCAmelCase )]) / 2**20
lowerCamelCase__ : Optional[Any] = (torch.cuda.max_memory_allocated(_UpperCAmelCase ) - start_measures[str(_UpperCAmelCase )]) / 2**20
return measures
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
print(F"""{description}:""" )
print(F"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(_UpperCAmelCase )]:.2f}MiB""" )
lowerCamelCase__ : List[str] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 295
| 0
|
'''simple docstring'''
from typing import Any
class UpperCamelCase__ :
def __init__( self : int , lowerCamelCase : Any ):
'''simple docstring'''
a__ = data
a__ = None
class UpperCamelCase__ :
def __init__( self : str ):
'''simple docstring'''
a__ = None
def __a ( self : Union[str, Any] ):
'''simple docstring'''
a__ = self.head
while temp is not None:
print(temp.data , end=" " )
a__ = temp.next
print()
def __a ( self : Tuple , lowerCamelCase : Any ):
'''simple docstring'''
a__ = Node(lowerCamelCase )
a__ = self.head
a__ = new_node
def __a ( self : List[str] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
a__ = self.head
while node_a is not None and node_a.data != node_data_a:
a__ = node_a.next
a__ = self.head
while node_a is not None and node_a.data != node_data_a:
a__ = node_a.next
if node_a is None or node_a is None:
return
a__ , a__ = node_a.data, node_a.data
if __name__ == "__main__":
lowerCAmelCase_ : Dict = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 289
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def _lowerCamelCase (__lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=False ) -> Dict:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
a__ = os.path.abspath(__lowerCamelCase )
logger.info(f'''Loading PyTorch weights from {pt_path}''' )
a__ = torch.load(__lowerCamelCase , map_location="cpu" )
logger.info(f'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
a__ = convert_pytorch_state_dict_to_flax(__lowerCamelCase , __lowerCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
a__ = convert_pytorch_sharded_state_dict_to_flax(__lowerCamelCase , __lowerCamelCase )
return flax_state_dict
def _lowerCamelCase (__lowerCamelCase : Tuple[str] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, jnp.ndarray] , __lowerCamelCase : str , ) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(__lowerCamelCase : Tuple[str] ) -> bool:
return len(set(__lowerCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
a__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
a__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
a__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
a__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
a__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
a__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
a__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
a__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
a__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
a__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
a__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
a__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
a__ = pt_tuple_key[-2] + "_v"
if name is not None:
a__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _lowerCamelCase (__lowerCamelCase : int , __lowerCamelCase : List[Any] ) -> Any:
# convert pytorch tensor to numpy
a__ = {k: v.numpy() for k, v in pt_state_dict.items()}
a__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
a__ = flax_model.params["params"]
else:
a__ = flax_model.params
a__ = flatten_dict(__lowerCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
a__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(__lowerCamelCase )
a__ = {}
a__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
a__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
a__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
a__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
a__ = pt_tuple_key[1:]
# Correctly rename weight parameters
a__ , a__ = rename_key_and_reshape_tensor(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# add model prefix if necessary
a__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
a__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
a__ = jnp.asarray(__lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCamelCase , __lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
a__ = jnp.asarray(__lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
a__ = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
def _lowerCamelCase (__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ) -> Union[str, Any]:
import torch
# Load the index
a__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
a__ = torch.load(__lowerCamelCase )
a__ = {k: v.numpy() for k, v in pt_state_dict.items()}
a__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
a__ = flax_model.params["params"]
a__ = flatten_dict(__lowerCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
a__ = flax_model.params
a__ = flatten_dict(__lowerCamelCase )
a__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
a__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
a__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
a__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
a__ = pt_tuple_key[1:]
# Correctly rename weight parameters
a__ , a__ = rename_key_and_reshape_tensor(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# add model prefix if necessary
a__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
a__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
a__ = jnp.asarray(__lowerCamelCase )
continue
if "var" in flax_key[-1]:
a__ = jnp.asarray(__lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCamelCase , __lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
a__ = jnp.asarray(__lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
a__ = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
def _lowerCamelCase (__lowerCamelCase : Tuple , __lowerCamelCase : Tuple ) -> Any:
a__ = os.path.abspath(__lowerCamelCase )
logger.info(f'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
a__ = getattr(__lowerCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(__lowerCamelCase , "rb" ) as state_f:
try:
a__ = from_bytes(__lowerCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase (__lowerCamelCase : str , __lowerCamelCase : Tuple ) -> Dict:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
a__ = flatten_dict(jax.tree_util.tree_map(lambda __lowerCamelCase : x.dtype == jnp.bfloataa , __lowerCamelCase ) ).values()
if any(__lowerCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
a__ = jax.tree_util.tree_map(
lambda __lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __lowerCamelCase )
a__ = flatten_dict(__lowerCamelCase )
a__ = pt_model.state_dict()
a__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
a__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
a__ = []
a__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
a__ = flax_key_tuple[0] == pt_model.base_model_prefix
a__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
a__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
a__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__lowerCamelCase ) not in pt_model_dict:
# conv layer
a__ = flax_key_tuple[:-1] + ("weight",)
a__ = jnp.transpose(__lowerCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCamelCase ) not in pt_model_dict:
# linear layer
a__ = flax_key_tuple[:-1] + ("weight",)
a__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
a__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
a__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
a__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
a__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
a__ = ".".join(__lowerCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
a__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
a__ = key.split("." )
a__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
a__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
a__ = key_components[-2] + "_v"
if name is not None:
a__ = key_components[:-3] + [name]
a__ = ".".join(__lowerCamelCase )
a__ = key
if flax_key in special_pt_names:
a__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
a__ = np.asarray(__lowerCamelCase ) if not isinstance(__lowerCamelCase , np.ndarray ) else flax_tensor
a__ = torch.from_numpy(__lowerCamelCase )
# remove from missing keys
missing_keys.remove(__lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__lowerCamelCase )
pt_model.load_state_dict(__lowerCamelCase )
# re-transform missing_keys to list
a__ = list(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(f'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(__lowerCamelCase ) > 0:
logger.warning(
f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
" use it for predictions and inference." )
else:
logger.warning(
f'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
"If your task is similar to the task the model of the checkpoint was trained on, "
f'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 289
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a__ : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51
|
'''simple docstring'''
import socket
def lowerCAmelCase__ ( ):
_A : Dict = socket.socket(socket.AF_INET ,socket.SOCK_STREAM )
_A : List[Any] = socket.gethostname()
_A : List[str] = 12312
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' ,'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
_A : Optional[int] = sock.recv(1024 )
if not data:
break
out_file.write(lowerCamelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 128
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase: str = logging.get_logger(__name__)
_lowercase: List[str] = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ ='swin2sr'
UpperCamelCase__ ={
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : str , lowercase__ : List[Any]=64 , lowercase__ : List[str]=1 , lowercase__ : Optional[int]=3 , lowercase__ : Tuple=1_80 , lowercase__ : Optional[Any]=[6, 6, 6, 6, 6, 6] , lowercase__ : Optional[int]=[6, 6, 6, 6, 6, 6] , lowercase__ : int=8 , lowercase__ : int=2.0 , lowercase__ : List[Any]=True , lowercase__ : List[str]=0.0 , lowercase__ : Union[str, Any]=0.0 , lowercase__ : str=0.1 , lowercase__ : Optional[int]="gelu" , lowercase__ : Any=False , lowercase__ : Any=0.0_2 , lowercase__ : List[str]=1e-5 , lowercase__ : List[Any]=2 , lowercase__ : Tuple=1.0 , lowercase__ : List[str]="1conv" , lowercase__ : Any="pixelshuffle" , **lowercase__ : List[str] , ):
super().__init__(**_a )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = len(_a )
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = upscale
_lowerCAmelCase = img_range
_lowerCAmelCase = resi_connection
_lowerCAmelCase = upsampler
| 721
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase: Any = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Tuple = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Optional[Any] = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Optional[Any] = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Optional[Any] = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_lowercase: Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 225
| 0
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Tuple , __snake_case : int , __snake_case : List[str]=13 , __snake_case : int=7 , __snake_case : int=True , __snake_case : int=True , __snake_case : Dict=True , __snake_case : str=True , __snake_case : Dict=99 , __snake_case : Optional[int]=32 , __snake_case : Optional[Any]=5 , __snake_case : Union[str, Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : int="gelu" , __snake_case : Union[str, Any]=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Any=512 , __snake_case : Dict=16 , __snake_case : Optional[int]=2 , __snake_case : str=0.0_2 , __snake_case : int=4 , ):
lowerCamelCase :Union[str, Any] = parent
lowerCamelCase :str = batch_size
lowerCamelCase :Dict = seq_length
lowerCamelCase :int = is_training
lowerCamelCase :int = use_attention_mask
lowerCamelCase :Optional[Any] = use_token_type_ids
lowerCamelCase :int = use_labels
lowerCamelCase :List[Any] = vocab_size
lowerCamelCase :str = hidden_size
lowerCamelCase :Optional[int] = num_hidden_layers
lowerCamelCase :Tuple = num_attention_heads
lowerCamelCase :Tuple = intermediate_size
lowerCamelCase :Tuple = hidden_act
lowerCamelCase :Any = hidden_dropout_prob
lowerCamelCase :List[str] = attention_probs_dropout_prob
lowerCamelCase :Any = max_position_embeddings
lowerCamelCase :Dict = type_vocab_size
lowerCamelCase :int = type_sequence_label_size
lowerCamelCase :str = initializer_range
lowerCamelCase :Any = num_choices
def snake_case ( self : Any ):
lowerCamelCase :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase :Any = None
if self.use_attention_mask:
lowerCamelCase :int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase :Dict = None
if self.use_token_type_ids:
lowerCamelCase :Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase :List[str] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case ( self : Any ):
lowerCamelCase :int = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :Any = config_and_inputs
lowerCamelCase :str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def snake_case ( self : str ):
lowerCamelCase :Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :Optional[Any] = config_and_inputs
lowerCamelCase :List[Any] = True
lowerCamelCase :Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase :str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = True
_UpperCAmelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case ( self : Optional[int] ):
lowerCamelCase :str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def snake_case ( self : Any ):
for model_class_name in self.all_model_classes:
lowerCamelCase :Dict = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__snake_case )
lowerCamelCase :Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__snake_case )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def snake_case ( self : Tuple ):
lowerCamelCase :List[str] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__snake_case )
lowerCamelCase :int = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
lowerCamelCase :List[str] = model(__snake_case )[0]
lowerCamelCase :Tuple = [1, 11, 50265]
self.assertEqual(list(output.shape ) , __snake_case )
# compare the actual values for a slice.
lowerCamelCase :Optional[Any] = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) )
@slow
def snake_case ( self : Any ):
lowerCamelCase :Optional[int] = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__snake_case )
lowerCamelCase :Union[str, Any] = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
lowerCamelCase :int = model(__snake_case )[0]
# compare the actual values for a slice.
lowerCamelCase :List[Any] = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) )
| 166
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = """▁"""
A__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
A__ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
A__ = {
"""xlm-roberta-base""": 512,
"""xlm-roberta-large""": 512,
"""xlm-roberta-large-finetuned-conll02-dutch""": 512,
"""xlm-roberta-large-finetuned-conll02-spanish""": 512,
"""xlm-roberta-large-finetuned-conll03-english""": 512,
"""xlm-roberta-large-finetuned-conll03-german""": 512,
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self : Tuple , __snake_case : Any , __snake_case : str="<s>" , __snake_case : Dict="</s>" , __snake_case : List[Any]="</s>" , __snake_case : str="<s>" , __snake_case : Tuple="<unk>" , __snake_case : int="<pad>" , __snake_case : List[str]="<mask>" , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase :int = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
lowerCamelCase :str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
lowerCamelCase :Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__snake_case ) )
lowerCamelCase :List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase :Tuple = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase :Tuple = 1
lowerCamelCase :Dict = len(self.sp_model ) + self.fairseq_offset
lowerCamelCase :Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.__dict__.copy()
lowerCamelCase :int = None
lowerCamelCase :List[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict , __snake_case : Optional[Any] ):
lowerCamelCase :str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase :List[str] = {}
lowerCamelCase :str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase :Optional[int] = [self.cls_token_id]
lowerCamelCase :Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def snake_case ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
lowerCamelCase :Dict = [self.sep_token_id]
lowerCamelCase :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case ( self : int ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def snake_case ( self : Optional[int] ):
lowerCamelCase :int = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case ( self : List[str] , __snake_case : str ):
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def snake_case ( self : Tuple , __snake_case : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase :Dict = self.sp_model.PieceToId(__snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case ( self : Any , __snake_case : Any ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case ( self : Dict , __snake_case : List[str] ):
lowerCamelCase :Optional[Any] = ''''''.join(__snake_case ).replace(__snake_case , ''' ''' ).strip()
return out_string
def snake_case ( self : Optional[Any] , __snake_case : str , __snake_case : Optional[str] = None ):
if not os.path.isdir(__snake_case ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCamelCase :Tuple = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , '''wb''' ) as fi:
lowerCamelCase :Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
| 166
| 1
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = '''informer'''
a__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "student_t" , __lowerCAmelCase = "nll" , __lowerCAmelCase = 1 , __lowerCAmelCase = None , __lowerCAmelCase = "mean" , __lowerCAmelCase = 0 , __lowerCAmelCase = 0 , __lowerCAmelCase = 0 , __lowerCAmelCase = 0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 6_4 , __lowerCAmelCase = 3_2 , __lowerCAmelCase = 3_2 , __lowerCAmelCase = 2 , __lowerCAmelCase = 2 , __lowerCAmelCase = 2 , __lowerCAmelCase = 2 , __lowerCAmelCase = True , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.05 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 1_0_0 , __lowerCAmelCase = 0.02 , __lowerCAmelCase=True , __lowerCAmelCase = "prob" , __lowerCAmelCase = 5 , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
# time series specific configuration
__magic_name__ :str = prediction_length
__magic_name__ :List[Any] = context_length or prediction_length
__magic_name__ :Dict = distribution_output
__magic_name__ :Optional[int] = loss
__magic_name__ :Tuple = input_size
__magic_name__ :Optional[Any] = num_time_features
__magic_name__ :Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__magic_name__ :Dict = scaling
__magic_name__ :Union[str, Any] = num_dynamic_real_features
__magic_name__ :Union[str, Any] = num_static_real_features
__magic_name__ :List[str] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__magic_name__ :Optional[int] = cardinality
else:
__magic_name__ :List[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__magic_name__ :Tuple = embedding_dimension
else:
__magic_name__ :Any = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__magic_name__ :List[str] = num_parallel_samples
# Transformer architecture configuration
__magic_name__ :Optional[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
__magic_name__ :List[str] = d_model
__magic_name__ :Union[str, Any] = encoder_attention_heads
__magic_name__ :Optional[int] = decoder_attention_heads
__magic_name__ :Optional[Any] = encoder_ffn_dim
__magic_name__ :Dict = decoder_ffn_dim
__magic_name__ :List[Any] = encoder_layers
__magic_name__ :List[str] = decoder_layers
__magic_name__ :int = dropout
__magic_name__ :Union[str, Any] = attention_dropout
__magic_name__ :str = activation_dropout
__magic_name__ :str = encoder_layerdrop
__magic_name__ :List[Any] = decoder_layerdrop
__magic_name__ :int = activation_function
__magic_name__ :List[Any] = init_std
__magic_name__ :List[str] = use_cache
# Informer
__magic_name__ :List[str] = attention_type
__magic_name__ :Union[str, Any] = sampling_factor
__magic_name__ :List[Any] = distil
super().__init__(is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 180
|
SCREAMING_SNAKE_CASE__ : str = """Alexander Joslin"""
import operator as op
from .stack import Stack
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[int] = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
__magic_name__ :Stack[int] = Stack()
__magic_name__ :Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(snake_case ) )
elif i in operators:
# RULE 2
operator_stack.push(snake_case )
elif i == ")":
# RULE 4
__magic_name__ :Optional[int] = operator_stack.peek()
operator_stack.pop()
__magic_name__ :List[str] = operand_stack.peek()
operand_stack.pop()
__magic_name__ :Optional[Any] = operand_stack.peek()
operand_stack.pop()
__magic_name__ :Optional[int] = operators[opr](snake_case, snake_case )
operand_stack.push(snake_case )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 180
| 1
|
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
A_ = logging.get_logger(__name__)
def _UpperCamelCase ( __UpperCamelCase=None ,__UpperCamelCase=None ) -> Tuple:
return field(default_factory=lambda: default ,metadata=__UpperCamelCase )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = list_field(
default=[] , metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
} , )
SCREAMING_SNAKE_CASE_ = list_field(
default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
SCREAMING_SNAKE_CASE_ = list_field(
default=[8, 32, 1_28, 5_12] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , )
SCREAMING_SNAKE_CASE_ = field(
default=__lowerCamelCase , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , )
SCREAMING_SNAKE_CASE_ = field(
default=__lowerCamelCase , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , )
SCREAMING_SNAKE_CASE_ = field(
default=__lowerCamelCase , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
SCREAMING_SNAKE_CASE_ = field(default=__lowerCamelCase , metadata={'help': 'Use FP16 to accelerate inference.'} )
SCREAMING_SNAKE_CASE_ = field(default=__lowerCamelCase , metadata={'help': 'Benchmark training of model'} )
SCREAMING_SNAKE_CASE_ = field(default=__lowerCamelCase , metadata={'help': 'Verbose memory tracing'} )
SCREAMING_SNAKE_CASE_ = field(
default=__lowerCamelCase , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , )
SCREAMING_SNAKE_CASE_ = field(
default=__lowerCamelCase , metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
} , )
SCREAMING_SNAKE_CASE_ = field(default=__lowerCamelCase , metadata={'help': 'Trace memory line by line'} )
SCREAMING_SNAKE_CASE_ = field(default=__lowerCamelCase , metadata={'help': 'Save result to a CSV file'} )
SCREAMING_SNAKE_CASE_ = field(default=__lowerCamelCase , metadata={'help': 'Save all print statements in a log file'} )
SCREAMING_SNAKE_CASE_ = field(default=__lowerCamelCase , metadata={'help': 'Whether to print environment information'} )
SCREAMING_SNAKE_CASE_ = field(
default=__lowerCamelCase , metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=f"inference_time_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving time results to csv.'} , )
SCREAMING_SNAKE_CASE_ = field(
default=f"inference_memory_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving memory results to csv.'} , )
SCREAMING_SNAKE_CASE_ = field(
default=f"train_time_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , )
SCREAMING_SNAKE_CASE_ = field(
default=f"train_memory_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , )
SCREAMING_SNAKE_CASE_ = field(
default=f"env_info_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving environment information.'} , )
SCREAMING_SNAKE_CASE_ = field(
default=f"log_{round(time() )}.csv" , metadata={'help': 'Log filename used if print statements are saved in log.'} , )
SCREAMING_SNAKE_CASE_ = field(default=3 , metadata={'help': 'Times an experiment will be run.'} )
SCREAMING_SNAKE_CASE_ = field(
default=__lowerCamelCase , metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
} , )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
warnings.warn(
f'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' , SCREAMING_SNAKE_CASE_ , )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].' )
return self.models
@property
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.' )
return False
else:
return True
| 42
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __lowercase ( unittest.TestCase ):
def __init__( self : Union[str, Any] ,A : Optional[int] ,A : int=13 ,A : Tuple=7 ,A : Dict=True ,A : Optional[int]=True ,A : Tuple=True ,A : str=True ,A : Any=99 ,A : Tuple=32 ,A : Dict=5 ,A : Optional[int]=4 ,A : Dict=37 ,A : Any="gelu" ,A : Any=0.1 ,A : Optional[int]=0.1 ,A : Union[str, Any]=512 ,A : Any=16 ,A : List[str]=2 ,A : List[Any]=0.0_2 ,A : Optional[int]=4 ,):
'''simple docstring'''
UpperCAmelCase__ : Dict = parent
UpperCAmelCase__ : Any = batch_size
UpperCAmelCase__ : List[Any] = seq_length
UpperCAmelCase__ : Optional[int] = is_training
UpperCAmelCase__ : Optional[Any] = use_attention_mask
UpperCAmelCase__ : int = use_token_type_ids
UpperCAmelCase__ : int = use_labels
UpperCAmelCase__ : Any = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase__ : Any = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : List[Any] = type_vocab_size
UpperCAmelCase__ : List[str] = type_sequence_label_size
UpperCAmelCase__ : List[Any] = initializer_range
UpperCAmelCase__ : List[Any] = num_choices
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ : List[str] = None
if self.use_attention_mask:
UpperCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : int = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=A ,)
return config, input_ids, attention_mask
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = config_and_inputs
UpperCAmelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = FlaxDistilBertModelTester(self )
@slow
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained("""distilbert-base-uncased""" )
UpperCAmelCase__ : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(A )
@require_flax
class __lowercase ( unittest.TestCase ):
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
UpperCAmelCase__ : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
UpperCAmelCase__ : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCAmelCase__ : Dict = model(A ,attention_mask=A )[0]
UpperCAmelCase__ : List[Any] = (1, 11, 768)
self.assertEqual(output.shape ,A )
UpperCAmelCase__ : Any = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,A ,atol=1e-4 ) )
| 65
| 0
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase : List[Any] = logging.get_logger(__name__)
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : List[str] = '''AutoTokenizer'''
__A : str = ['''tokenizer''']
__A : List[str] = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self , lowercase , lowercase=None) -> List[str]:
'''simple docstring'''
super().__init__(lowercase)
a__ : Dict = speaker_embeddings
@classmethod
def __lowercase ( cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase) -> List[str]:
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
a__ : List[str] = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop('subfolder' , lowercase) , cache_dir=kwargs.pop('cache_dir' , lowercase) , force_download=kwargs.pop('force_download' , lowercase) , proxies=kwargs.pop('proxies' , lowercase) , resume_download=kwargs.pop('resume_download' , lowercase) , local_files_only=kwargs.pop('local_files_only' , lowercase) , use_auth_token=kwargs.pop('use_auth_token' , lowercase) , revision=kwargs.pop('revision' , lowercase) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowercase , lowercase)}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.')
a__ : int = None
else:
with open(lowercase) as speaker_embeddings_json:
a__ : Union[str, Any] = json.load(lowercase)
else:
a__ : int = None
a__ : str = AutoTokenizer.from_pretrained(lowercase , **lowercase)
return cls(tokenizer=lowercase , speaker_embeddings=lowercase)
def __lowercase ( self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ) -> Optional[Any]:
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , 'v2') , exist_ok=lowercase)
a__ : List[Any] = {}
a__ : Dict = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
a__ : List[Any] = self._load_voice_preset(lowercase)
a__ : Union[str, Any] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , lowercase , F'{prompt_key}_{key}') , voice_preset[key] , allow_pickle=lowercase , )
a__ : Tuple = os.path.join(lowercase , F'{prompt_key}_{key}.npy')
a__ : Any = tmp_dict
with open(os.path.join(lowercase , lowercase) , 'w') as fp:
json.dump(lowercase , lowercase)
super().save_pretrained(lowercase , lowercase , **lowercase)
def __lowercase ( self , lowercase = None , **lowercase) -> Dict:
'''simple docstring'''
a__ : List[Any] = self.speaker_embeddings[voice_preset]
a__ : int = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].')
a__ : Dict = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/') , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , lowercase) , cache_dir=kwargs.pop('cache_dir' , lowercase) , force_download=kwargs.pop('force_download' , lowercase) , proxies=kwargs.pop('proxies' , lowercase) , resume_download=kwargs.pop('resume_download' , lowercase) , local_files_only=kwargs.pop('local_files_only' , lowercase) , use_auth_token=kwargs.pop('use_auth_token' , lowercase) , revision=kwargs.pop('revision' , lowercase) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/") , voice_preset_paths[key])}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.')
a__ : Optional[Any] = np.load(lowercase)
return voice_preset_dict
def __lowercase ( self , lowercase = None) -> Optional[int]:
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.')
if not isinstance(voice_preset[key] , np.ndarray):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.')
if len(voice_preset[key].shape) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.')
def __call__( self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ) -> Union[str, Any]:
'''simple docstring'''
if voice_preset is not None and not isinstance(lowercase , lowercase):
if (
isinstance(lowercase , lowercase)
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
a__ : int = self._load_voice_preset(lowercase)
else:
if isinstance(lowercase , lowercase) and not voice_preset.endswith('.npz'):
a__ : Tuple = voice_preset + '.npz'
a__ : str = np.load(lowercase)
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase)
a__ : int = BatchFeature(data=lowercase , tensor_type=lowercase)
a__ : List[Any] = self.tokenizer(
lowercase , return_tensors=lowercase , padding='max_length' , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
a__ : Union[str, Any] = voice_preset
return encoded_text
| 713
|
def A_ ( A__ ) -> list[int]:
if num <= 0:
raise ValueError('Input must be a positive integer' )
a__ : Any = [True] * (num + 1)
a__ : Dict = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , A__ ):
a__ : Tuple = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : Union[str, Any] = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 392
| 0
|
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : str = logging.get_logger(__name__)
lowerCAmelCase_ : List[Any] = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class UpperCamelCase__ ( __lowerCAmelCase ):
lowerCAmelCase__ : Tuple = "xlm-prophetnet"
lowerCAmelCase__ : Optional[Any] = ["past_key_values"]
lowerCAmelCase__ : Union[str, Any] = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self : int , lowerCamelCase : Optional[float] = 0.1 , lowerCamelCase : Optional[Union[str, Callable]] = "gelu" , lowerCamelCase : Optional[int] = 3_0_5_2_2 , lowerCamelCase : Optional[int] = 1_0_2_4 , lowerCamelCase : Optional[int] = 4_0_9_6 , lowerCamelCase : Optional[int] = 1_2 , lowerCamelCase : Optional[int] = 1_6 , lowerCamelCase : Optional[int] = 4_0_9_6 , lowerCamelCase : Optional[int] = 1_2 , lowerCamelCase : Optional[int] = 1_6 , lowerCamelCase : Optional[float] = 0.1 , lowerCamelCase : Optional[float] = 0.1 , lowerCamelCase : Optional[int] = 5_1_2 , lowerCamelCase : Optional[float] = 0.02 , lowerCamelCase : Optional[bool] = True , lowerCamelCase : Optional[bool] = True , lowerCamelCase : Optional[int] = 0 , lowerCamelCase : Optional[int] = 2 , lowerCamelCase : Optional[int] = 3_2 , lowerCamelCase : Optional[int] = 1_2_8 , lowerCamelCase : Optional[bool] = False , lowerCamelCase : Optional[float] = 0.0 , lowerCamelCase : Optional[bool] = True , lowerCamelCase : Optional[int] = 0 , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : Optional[int] = 2 , **lowerCamelCase : Dict , ):
'''simple docstring'''
a__ = vocab_size
a__ = hidden_size
a__ = encoder_ffn_dim
a__ = num_encoder_layers
a__ = num_encoder_attention_heads
a__ = decoder_ffn_dim
a__ = num_decoder_layers
a__ = num_decoder_attention_heads
a__ = max_position_embeddings
a__ = init_std # Normal(0, this parameter)
a__ = activation_function
# parameters for xlmprophetnet
a__ = ngram
a__ = num_buckets
a__ = relative_max_distance
a__ = disable_ngram_loss
a__ = eps
# 3 Types of Dropout
a__ = attention_dropout
a__ = activation_dropout
a__ = dropout
a__ = use_cache
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , add_cross_attention=lowerCamelCase , decoder_start_token_id=lowerCamelCase , **lowerCamelCase , )
@property
def __a ( self : str ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __a ( self : List[Any] , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." )
| 489
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Generic, TypeVar
lowerCAmelCase_ : Optional[int] = TypeVar("_T")
class UpperCamelCase__ ( Generic[_T] ):
def __init__( self : Dict , lowerCamelCase : Iterable[_T] | None = None ):
'''simple docstring'''
a__ = list(iterable or [] )
a__ = []
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(self._stacka ) + len(self._stacka )
def __repr__( self : List[Any] ):
'''simple docstring'''
return F'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def __a ( self : Union[str, Any] , lowerCamelCase : _T ):
'''simple docstring'''
self._stacka.append(lowerCamelCase )
def __a ( self : Dict ):
'''simple docstring'''
a__ = self._stacka.pop
a__ = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 489
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = OpenAIGPTTokenizer
__lowerCAmelCase = OpenAIGPTTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__a : str = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__a : List[Any] = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_UpperCAmelCase ) )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return "lower newer", "lower newer"
def _lowerCamelCase ( self ):
__a : Tuple = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__a : Tuple = '''lower'''
__a : Union[str, Any] = ['''low''', '''er</w>''']
__a : str = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : List[Any] = tokens + ['''<unk>''']
__a : Tuple = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Any = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# Simple input
__a : str = '''This is a simple input'''
__a : Dict = ['''This is a simple input 1''', '''This is a simple input 2''']
__a : List[Any] = ('''This is a simple input''', '''This is a pair''')
__a : Optional[Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' , )
def _lowerCamelCase ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
pass
| 101
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowercase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
__a : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__a : List[str] = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
torch.manual_seed(0 )
__a : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__a : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__a : Optional[int] = CLIPTextModel(_UpperCAmelCase )
__a : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__a : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
__a : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
__a : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : Tuple = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('''RGB''' )
if str(_UpperCAmelCase ).startswith('''mps''' ):
__a : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
__a : Union[str, Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__a : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self ):
__a : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a : Dict = self.get_dummy_components()
__a : Any = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
__a : int = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Union[str, Any] = self.get_dummy_inputs(_UpperCAmelCase )
__a : str = sd_pipe(**_UpperCAmelCase ).images
__a : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a : Optional[int] = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCamelCase ( self ):
__a : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a : Optional[Any] = self.get_dummy_components()
__a : Dict = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
__a : List[Any] = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Dict = self.get_dummy_inputs(_UpperCAmelCase )
__a : Union[str, Any] = '''french fries'''
__a : str = sd_pipe(**_UpperCAmelCase , negative_prompt=_UpperCAmelCase )
__a : Dict = output.images
__a : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a : Tuple = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCamelCase ( self ):
__a : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a : Dict = self.get_dummy_components()
__a : str = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
__a : Optional[int] = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Union[str, Any] = self.get_dummy_inputs(_UpperCAmelCase )
__a : List[str] = [inputs['''prompt''']] * 2
__a : Optional[Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 2_5_5.0
__a : Optional[Any] = torch.from_numpy(_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase )
__a : Tuple = image / 2 + 0.5
__a : str = image.permute(0 , 3 , 1 , 2 )
__a : List[str] = image.repeat(2 , 1 , 1 , 1 )
__a : int = sd_pipe(**_UpperCAmelCase ).images
__a : Optional[Any] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__a : List[str] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCamelCase ( self ):
__a : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a : List[str] = self.get_dummy_components()
__a : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' )
__a : str = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
__a : List[str] = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Dict = self.get_dummy_inputs(_UpperCAmelCase )
__a : Any = sd_pipe(**_UpperCAmelCase ).images
__a : Dict = image[0, -3:, -3:, -1]
__a : Optional[int] = [round(_UpperCAmelCase , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(_UpperCAmelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__a : int = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _lowerCamelCase ( self ):
__a : Any = self.get_dummy_components()
__a : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
__a : Any = VaeImageProcessor(do_resize=_UpperCAmelCase , do_normalize=_UpperCAmelCase )
__a : Dict = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : str = pipe(**self.get_dummy_inputs_by_type(_UpperCAmelCase , input_image_type='''pt''' ) )[0]
__a : List[Any] = components['''vae''']
__a : List[Any] = self.get_dummy_inputs_by_type(_UpperCAmelCase , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__a : Optional[Any] = vae.encode(inputs[image_param] ).latent_dist.mode()
__a : str = pipe(**_UpperCAmelCase )[0]
__a : Union[str, Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(_UpperCAmelCase , 1e-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self , _UpperCAmelCase=0 ):
__a : List[str] = torch.manual_seed(_UpperCAmelCase )
__a : str = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
__a : List[str] = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self ):
__a : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__a : str = self.get_inputs()
__a : Optional[Any] = pipe(**_UpperCAmelCase ).images
__a : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__a : Tuple = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowerCamelCase ( self ):
__a : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_UpperCAmelCase )
__a : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__a : List[str] = self.get_inputs()
__a : Optional[int] = pipe(**_UpperCAmelCase ).images
__a : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__a : Tuple = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowerCamelCase ( self ):
__a : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_UpperCAmelCase )
__a : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__a : List[str] = self.get_inputs()
__a : str = pipe(**_UpperCAmelCase ).images
__a : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__a : Any = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowerCamelCase ( self ):
__a : Dict = 0
def callback_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
__a : Optional[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__a : Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__a : int = latents[0, -3:, -3:, -1]
__a : int = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__a : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__a : List[str] = latents[0, -3:, -3:, -1]
__a : Tuple = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__a : Union[str, Any] = False
__a : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
__a : Optional[int] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__a : str = self.get_inputs()
pipe(**_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _lowerCamelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
__a : Optional[int] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a : List[Any] = self.get_inputs()
__a : Tuple = pipe(**_UpperCAmelCase )
__a : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def _lowerCamelCase ( self ):
__a : List[Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__a : str = inputs['''image'''].resize((504, 504) )
__a : Tuple = '''timbrooks/instruct-pix2pix'''
__a : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_UpperCAmelCase , safety_checker=_UpperCAmelCase , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__a : List[Any] = pipe(**_UpperCAmelCase )
__a : int = output.images[0]
__a : Optional[int] = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__a : Union[str, Any] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 101
| 1
|
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = BertTokenizer
lowercase__ = BertTokenizerFast
lowercase__ = True
lowercase__ = True
lowercase__ = filter_non_english
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
super().setUp()
_UpperCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def UpperCAmelCase ( self , __a) -> str:
'''simple docstring'''
_UpperCamelCase = '''UNwant\u00E9d,running'''
_UpperCamelCase = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class(self.vocab_file)
_UpperCamelCase = tokenizer.tokenize('''UNwant\u00E9d,running''')
self.assertListEqual(__a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a) , [9, 6, 7, 12, 10, 11])
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = '''UNwant\u00E9d,running'''
_UpperCamelCase = tokenizer.tokenize(__a)
_UpperCamelCase = rust_tokenizer.tokenize(__a)
self.assertListEqual(__a , __a)
_UpperCamelCase = tokenizer.encode(__a , add_special_tokens=__a)
_UpperCamelCase = rust_tokenizer.encode(__a , add_special_tokens=__a)
self.assertListEqual(__a , __a)
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(__a)
_UpperCamelCase = rust_tokenizer.encode(__a)
self.assertListEqual(__a , __a)
# With lower casing
_UpperCamelCase = self.get_tokenizer(do_lower_case=__a)
_UpperCamelCase = self.get_rust_tokenizer(do_lower_case=__a)
_UpperCamelCase = '''UNwant\u00E9d,running'''
_UpperCamelCase = tokenizer.tokenize(__a)
_UpperCamelCase = rust_tokenizer.tokenize(__a)
self.assertListEqual(__a , __a)
_UpperCamelCase = tokenizer.encode(__a , add_special_tokens=__a)
_UpperCamelCase = rust_tokenizer.encode(__a , add_special_tokens=__a)
self.assertListEqual(__a , __a)
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(__a)
_UpperCamelCase = rust_tokenizer.encode(__a)
self.assertListEqual(__a , __a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''') , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''])
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello'''])
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''h\u00E9llo'''])
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello'''])
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello'''])
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a , never_split=['''[UNK]'''])
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''') , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''])
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer()
_UpperCamelCase = '''a\n\'ll !!to?\'d of, can\'t.'''
_UpperCamelCase = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(__a) , __a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
_UpperCamelCase = {}
for i, token in enumerate(__a):
_UpperCamelCase = i
_UpperCamelCase = WordpieceTokenizer(vocab=__a , unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') , [])
self.assertListEqual(tokenizer.tokenize('''unwanted running''') , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.tokenize('''unwantedX running''') , ['''[UNK]''', '''runn''', '''##ing'''])
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(_is_whitespace(''' '''))
self.assertTrue(_is_whitespace('''\t'''))
self.assertTrue(_is_whitespace('''\r'''))
self.assertTrue(_is_whitespace('''\n'''))
self.assertTrue(_is_whitespace('''\u00A0'''))
self.assertFalse(_is_whitespace('''A'''))
self.assertFalse(_is_whitespace('''-'''))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
self.assertTrue(_is_control('''\u0005'''))
self.assertFalse(_is_control('''A'''))
self.assertFalse(_is_control(''' '''))
self.assertFalse(_is_control('''\t'''))
self.assertFalse(_is_control('''\r'''))
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
self.assertTrue(_is_punctuation('''-'''))
self.assertTrue(_is_punctuation('''$'''))
self.assertTrue(_is_punctuation('''`'''))
self.assertTrue(_is_punctuation('''.'''))
self.assertFalse(_is_punctuation('''A'''))
self.assertFalse(_is_punctuation(''' '''))
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']])
self.assertListEqual(
[rust_tokenizer.tokenize(__a) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']])
@slow
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class.from_pretrained('''bert-base-uncased''')
_UpperCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__a)
_UpperCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__a)
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__a)
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__a , __a)
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a)
_UpperCamelCase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_UpperCamelCase = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
_UpperCamelCase = tokenizer_r.do_lower_case if hasattr(__a , '''do_lower_case''') else False
_UpperCamelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids''']))
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''])
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ['''的''', '''人''', '''有''']
_UpperCamelCase = ''''''.join(__a)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
_UpperCamelCase = True
_UpperCamelCase = self.tokenizer_class.from_pretrained(__a , **__a)
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a)
_UpperCamelCase = tokenizer_p.encode(__a , add_special_tokens=__a)
_UpperCamelCase = tokenizer_r.encode(__a , add_special_tokens=__a)
_UpperCamelCase = tokenizer_r.convert_ids_to_tokens(__a)
_UpperCamelCase = tokenizer_p.convert_ids_to_tokens(__a)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a)
self.assertListEqual(__a , __a)
_UpperCamelCase = False
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a)
_UpperCamelCase = self.tokenizer_class.from_pretrained(__a , **__a)
_UpperCamelCase = tokenizer_r.encode(__a , add_special_tokens=__a)
_UpperCamelCase = tokenizer_p.encode(__a , add_special_tokens=__a)
_UpperCamelCase = tokenizer_r.convert_ids_to_tokens(__a)
_UpperCamelCase = tokenizer_p.convert_ids_to_tokens(__a)
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCamelCase = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__a)
]
self.assertListEqual(__a , __a)
self.assertListEqual(__a , __a)
| 19
|
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=None, **__snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = [x.strip() for x in open(__snake_case ).readlines()]
_UpperCamelCase = [x.strip() for x in open(__snake_case ).readlines()][: len(__snake_case )]
_UpperCamelCase = calculate_rouge(__snake_case, __snake_case, **__snake_case )
if save_path is not None:
save_json(__snake_case, __snake_case, indent=__snake_case )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 19
| 1
|
from __future__ import annotations
import os
from typing import Any
import requests
UpperCamelCase = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
UpperCamelCase = BASE_URL + '''/user'''
# https://github.com/settings/tokens
UpperCamelCase = os.environ.get('''USER_TOKEN''', '''''')
def __lowerCamelCase ( snake_case__ ) -> dict[Any, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {
"""Authorization""": F'token {auth_token}',
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(snake_case__ ,headers=snake_case__ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"{key}: {value}")
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 718
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Dict = ""
__snake_case : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__snake_case : str = None # compression type in fsspec. ex: "gzip"
__snake_case : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self: int , UpperCAmelCase_: str = "" , UpperCAmelCase_: Optional[str] = None , UpperCAmelCase_: Optional[dict] = None , **UpperCAmelCase_: Any ):
'''simple docstring'''
super().__init__(self , **UpperCAmelCase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_SCREAMING_SNAKE_CASE = fsspec.open(
UpperCAmelCase_ , mode="""rb""" , protocol=UpperCAmelCase_ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
_SCREAMING_SNAKE_CASE = os.path.basename(self.file.path.split("""::""" )[0] )
_SCREAMING_SNAKE_CASE = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
_SCREAMING_SNAKE_CASE = None
@classmethod
def UpperCamelCase ( cls: str , UpperCAmelCase_: List[Any] ):
'''simple docstring'''
return super()._strip_protocol(UpperCAmelCase_ ).lstrip("""/""" )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
if self.dir_cache is None:
_SCREAMING_SNAKE_CASE = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
_SCREAMING_SNAKE_CASE = {f["""name"""]: f}
def UpperCamelCase ( self: str , UpperCAmelCase_: str ):
'''simple docstring'''
return self.file.open().read()
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: str = "rb" , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: int=True , UpperCAmelCase_: Optional[int]=None , **UpperCAmelCase_: Tuple , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._strip_protocol(UpperCAmelCase_ )
if mode != "rb":
raise ValueError(F'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : str = "bz2"
__snake_case : List[str] = "bz2"
__snake_case : Optional[int] = ".bz2"
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Union[str, Any] = "gzip"
__snake_case : str = "gzip"
__snake_case : str = ".gz"
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Tuple = "lz4"
__snake_case : Any = "lz4"
__snake_case : List[Any] = ".lz4"
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : str = "xz"
__snake_case : int = "xz"
__snake_case : Dict = ".xz"
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Optional[Any] = "zstd"
__snake_case : List[str] = "zstd"
__snake_case : List[str] = ".zst"
def __init__( self: Any , UpperCAmelCase_: str , UpperCAmelCase_: str = "rb" , UpperCAmelCase_: Optional[str] = None , UpperCAmelCase_: Optional[dict] = None , UpperCAmelCase_: int = DEFAULT_BLOCK_SIZE , **UpperCAmelCase_: Union[str, Any] , ):
'''simple docstring'''
super().__init__(
fo=UpperCAmelCase_ , mode=UpperCAmelCase_ , target_protocol=UpperCAmelCase_ , target_options=UpperCAmelCase_ , block_size=UpperCAmelCase_ , **UpperCAmelCase_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_SCREAMING_SNAKE_CASE = self.file.__enter__
class __UpperCAmelCase :
def __init__( self: List[str] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = file_
def __enter__( self: Dict ):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self: Optional[int] , *UpperCAmelCase_: Optional[Any] , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
self._file.__exit__(*UpperCAmelCase_ , **UpperCAmelCase_ )
def __iter__( self: Optional[int] ):
'''simple docstring'''
return iter(self._file )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
return next(self._file )
def __getattr__( self: List[Any] , UpperCAmelCase_: Dict ):
'''simple docstring'''
return getattr(self._file , UpperCAmelCase_ )
def fixed_enter(*UpperCAmelCase_: Dict , **UpperCAmelCase_: List[Any] ):
return WrappedFile(_enter(*UpperCAmelCase_ , **UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = fixed_enter
| 569
| 0
|
import re
from filelock import FileLock
try:
import nltk
SCREAMING_SNAKE_CASE__ = True
except (ImportError, ModuleNotFoundError):
SCREAMING_SNAKE_CASE__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def A ( __UpperCamelCase ) -> str:
re.sub('<n>' , '' , __UpperCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCamelCase ) )
| 9
|
SCREAMING_SNAKE_CASE__ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
SCREAMING_SNAKE_CASE__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 9
| 1
|
'''simple docstring'''
import math
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ = 10_001 ):
try:
lowercase_ : List[Any] = int(SCREAMING_SNAKE_CASE_ )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
lowercase_ : list[int] = []
lowercase_ : Optional[int] = 2
while len(SCREAMING_SNAKE_CASE_ ) < nth:
if is_prime(SCREAMING_SNAKE_CASE_ ):
primes.append(SCREAMING_SNAKE_CASE_ )
num += 1
else:
num += 1
return primes[len(SCREAMING_SNAKE_CASE_ ) - 1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 438
|
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[Any] = len(SCREAMING_SNAKE_CASE_ )
for _ in range(SCREAMING_SNAKE_CASE_ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowercase_ ,lowercase_ : Optional[Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_A = list(range(1_0, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 438
| 1
|
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _lowerCAmelCase(a : List[str] , a : List[Any] , a : int , a : int=5 ) -> Optional[int]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''' ) == 1
_SCREAMING_SNAKE_CASE =torch.tensor(tokenizer.encode(a , add_special_tokens=a ) ).unsqueeze(0 ) # Batch size 1
_SCREAMING_SNAKE_CASE =model(a )[0] # The last hidden-state is the first element of the output tuple
_SCREAMING_SNAKE_CASE =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_SCREAMING_SNAKE_CASE =logits[0, masked_index, :]
_SCREAMING_SNAKE_CASE =logits.softmax(dim=0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =prob.topk(k=a , dim=0 )
_SCREAMING_SNAKE_CASE =''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(a ) )] )
_SCREAMING_SNAKE_CASE =tokenizer.mask_token
_SCREAMING_SNAKE_CASE =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
_SCREAMING_SNAKE_CASE =predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(a ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(a ) , a ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(a , a ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
UpperCAmelCase_ : int = CamembertTokenizer.from_pretrained('''camembert-base''')
UpperCAmelCase_ : str = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
UpperCAmelCase_ : List[str] = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 255
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
def _lowerCAmelCase(a : Any ) -> int:
_SCREAMING_SNAKE_CASE =DPTConfig()
if "large" in checkpoint_url:
_SCREAMING_SNAKE_CASE =1024
_SCREAMING_SNAKE_CASE =4096
_SCREAMING_SNAKE_CASE =24
_SCREAMING_SNAKE_CASE =16
_SCREAMING_SNAKE_CASE =[5, 11, 17, 23]
_SCREAMING_SNAKE_CASE =[256, 512, 1024, 1024]
_SCREAMING_SNAKE_CASE =(1, 384, 384)
if "ade" in checkpoint_url:
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =150
_SCREAMING_SNAKE_CASE ='''huggingface/label-files'''
_SCREAMING_SNAKE_CASE ='''ade20k-id2label.json'''
_SCREAMING_SNAKE_CASE =json.load(open(cached_download(hf_hub_url(a , a , repo_type='''dataset''' ) ) , '''r''' ) )
_SCREAMING_SNAKE_CASE ={int(a ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE =idalabel
_SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE =[1, 150, 480, 480]
return config, expected_shape
def _lowerCAmelCase(a : Any ) -> List[Any]:
_SCREAMING_SNAKE_CASE =['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(a , a )
def _lowerCAmelCase(a : Dict ) -> Union[str, Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_SCREAMING_SNAKE_CASE =name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
_SCREAMING_SNAKE_CASE =name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
_SCREAMING_SNAKE_CASE =name.replace('''patch_embed''' , '''patch_embeddings''' )
if "pos_embed" in name:
_SCREAMING_SNAKE_CASE =name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
_SCREAMING_SNAKE_CASE =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
_SCREAMING_SNAKE_CASE =name.replace('''proj''' , '''projection''' )
if "blocks" in name:
_SCREAMING_SNAKE_CASE =name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
_SCREAMING_SNAKE_CASE =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_SCREAMING_SNAKE_CASE =name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name:
_SCREAMING_SNAKE_CASE =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_SCREAMING_SNAKE_CASE =name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
_SCREAMING_SNAKE_CASE =name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
_SCREAMING_SNAKE_CASE =name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
_SCREAMING_SNAKE_CASE =name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
_SCREAMING_SNAKE_CASE =name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
_SCREAMING_SNAKE_CASE =name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
_SCREAMING_SNAKE_CASE =name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
_SCREAMING_SNAKE_CASE =int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_SCREAMING_SNAKE_CASE =name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_SCREAMING_SNAKE_CASE =name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
_SCREAMING_SNAKE_CASE =name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
_SCREAMING_SNAKE_CASE =name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
_SCREAMING_SNAKE_CASE =name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
_SCREAMING_SNAKE_CASE =name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_SCREAMING_SNAKE_CASE =name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
_SCREAMING_SNAKE_CASE =name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
_SCREAMING_SNAKE_CASE =name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
_SCREAMING_SNAKE_CASE =name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_SCREAMING_SNAKE_CASE =name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
_SCREAMING_SNAKE_CASE =name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
_SCREAMING_SNAKE_CASE =name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
_SCREAMING_SNAKE_CASE =name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
_SCREAMING_SNAKE_CASE =name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
_SCREAMING_SNAKE_CASE =name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
_SCREAMING_SNAKE_CASE =name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
_SCREAMING_SNAKE_CASE =name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
_SCREAMING_SNAKE_CASE =name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
_SCREAMING_SNAKE_CASE =name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
_SCREAMING_SNAKE_CASE =name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
_SCREAMING_SNAKE_CASE =name.replace('''auxlayer''' , '''auxiliary_head.head''' )
return name
def _lowerCAmelCase(a : str , a : Union[str, Any] ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE =state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_SCREAMING_SNAKE_CASE =state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE =in_proj_weight[: config.hidden_size, :]
_SCREAMING_SNAKE_CASE =in_proj_bias[: config.hidden_size]
_SCREAMING_SNAKE_CASE =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_SCREAMING_SNAKE_CASE =in_proj_weight[
-config.hidden_size :, :
]
_SCREAMING_SNAKE_CASE =in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase() -> List[str]:
_SCREAMING_SNAKE_CASE ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_SCREAMING_SNAKE_CASE =Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase(a : Dict , a : Optional[Any] , a : int , a : List[str] ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =get_dpt_config(a )
# load original state_dict from URL
_SCREAMING_SNAKE_CASE =torch.hub.load_state_dict_from_url(a , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(a )
# rename keys
for key in state_dict.copy().keys():
_SCREAMING_SNAKE_CASE =state_dict.pop(a )
_SCREAMING_SNAKE_CASE =val
# read in qkv matrices
read_in_q_k_v(a , a )
# load HuggingFace model
_SCREAMING_SNAKE_CASE =DPTForSemanticSegmentation(a ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(a )
model.load_state_dict(a )
model.eval()
# Check outputs on an image
_SCREAMING_SNAKE_CASE =480 if '''ade''' in checkpoint_url else 384
_SCREAMING_SNAKE_CASE =DPTImageProcessor(size=a )
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(a , return_tensors='''pt''' )
# forward pass
_SCREAMING_SNAKE_CASE =model(**a ).logits if '''ade''' in checkpoint_url else model(**a ).predicted_depth
# Assert logits
_SCREAMING_SNAKE_CASE =torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
_SCREAMING_SNAKE_CASE =torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(a )
assert (
torch.allclose(outputs[0, 0, :3, :3] , a , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , a )
)
Path(a ).mkdir(exist_ok=a )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(a )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(a , a ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=a , )
image_processor.push_to_hub(
repo_path_or_name=Path(a , a ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=a , )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 255
| 1
|
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowerCAmelCase = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
__lowerCAmelCase = get_tests_dir('''fixtures/vocab.json''')
__lowerCAmelCase = get_tests_dir('''fixtures''')
class __a ( unittest.TestCase ):
__lowercase : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = 0
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: Optional[int] = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__: Optional[int] = WavaVecaConfig()
lowercase__: int = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
# save in new folder
model_config.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
lowercase__: Dict = AutoProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )
copyfile(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , 'vocab.json' ) )
lowercase__: Dict = AutoProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__: Dict = WavaVecaFeatureExtractor()
lowercase__: Any = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
lowercase__: Tuple = WavaVecaProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
# save in new folder
processor.save_pretrained(lowerCAmelCase__ )
# drop `processor_class` in tokenizer
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , 'r' ) as f:
lowercase__: Dict = json.load(lowerCAmelCase__ )
config_dict.pop('processor_class' )
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , 'w' ) as f:
f.write(json.dumps(lowerCAmelCase__ ) )
lowercase__: Optional[Any] = AutoProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__: Dict = WavaVecaFeatureExtractor()
lowercase__: Union[str, Any] = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
lowercase__: Optional[int] = WavaVecaProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
# save in new folder
processor.save_pretrained(lowerCAmelCase__ )
# drop `processor_class` in feature extractor
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , 'r' ) as f:
lowercase__: str = json.load(lowerCAmelCase__ )
config_dict.pop('processor_class' )
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , 'w' ) as f:
f.write(json.dumps(lowerCAmelCase__ ) )
lowercase__: Optional[Any] = AutoProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__: Dict = WavaVecaConfig(processor_class='Wav2Vec2Processor' )
model_config.save_pretrained(lowerCAmelCase__ )
# copy relevant files
copyfile(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , 'vocab.json' ) )
# create emtpy sample processor
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , 'w' ) as f:
f.write('{}' )
lowercase__: int = AutoProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCAmelCase__ ):
lowercase__: Tuple = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase__ ):
lowercase__: Any = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowerCAmelCase__ )
lowercase__: Any = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowerCAmelCase__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
lowercase__: Optional[int] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
lowercase__: int = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
lowercase__: List[str] = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowerCAmelCase__ , use_fast=lowerCAmelCase__ )
lowercase__: Dict = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
try:
AutoConfig.register('custom' , lowerCAmelCase__ )
AutoFeatureExtractor.register(lowerCAmelCase__ , lowerCAmelCase__ )
AutoTokenizer.register(lowerCAmelCase__ , slow_tokenizer_class=lowerCAmelCase__ )
AutoProcessor.register(lowerCAmelCase__ , lowerCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoProcessor.register(lowerCAmelCase__ , lowerCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase__: List[str] = CustomFeatureExtractor.from_pretrained(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__: List[Any] = os.path.join(lowerCAmelCase__ , 'vocab.txt' )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
lowercase__: Dict = CustomTokenizer(lowerCAmelCase__ )
lowercase__: int = CustomProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(lowerCAmelCase__ )
lowercase__: Tuple = AutoProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
class __a ( __UpperCamelCase ):
__lowercase : Dict = False
class __a ( __UpperCamelCase ):
__lowercase : Dict = False
class __a ( __UpperCamelCase ):
__lowercase : Union[str, Any] = 'AutoFeatureExtractor'
__lowercase : Tuple = 'AutoTokenizer'
__lowercase : str = False
try:
AutoConfig.register('custom' , lowerCAmelCase__ )
AutoFeatureExtractor.register(lowerCAmelCase__ , lowerCAmelCase__ )
AutoTokenizer.register(lowerCAmelCase__ , slow_tokenizer_class=lowerCAmelCase__ )
AutoProcessor.register(lowerCAmelCase__ , lowerCAmelCase__ )
# If remote code is not set, the default is to use local classes.
lowercase__: List[Any] = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowercase__: str = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowercase__: List[str] = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: List[Any] = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(processor.__class__.__name__ , 'BertTokenizerFast' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: Any = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext' )
self.assertEqual(processor.__class__.__name__ , 'ConvNextImageProcessor' )
@is_staging_test
class __a ( unittest.TestCase ):
__lowercase : Any = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ) -> List[Any]:
'''simple docstring'''
lowercase__: Optional[int] = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ) -> Dict:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-processor' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: int = WavaVecaProcessor.from_pretrained(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowerCAmelCase__ , 'test-processor' ) , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
lowercase__: List[Any] = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(new_processor.feature_extractor , lowerCAmelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: str = WavaVecaProcessor.from_pretrained(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowerCAmelCase__ , 'test-processor-org' ) , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token , organization='valid_org' , )
lowercase__: Optional[Any] = WavaVecaProcessor.from_pretrained('valid_org/test-processor-org' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(new_processor.feature_extractor , lowerCAmelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowercase__: List[str] = CustomFeatureExtractor.from_pretrained(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__: Union[str, Any] = os.path.join(lowerCAmelCase__ , 'vocab.txt' )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
lowercase__: Any = CustomTokenizer(lowerCAmelCase__ )
lowercase__: List[str] = CustomProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'{USER}/test-dynamic-processor' , token=self._token )
lowercase__: Dict = Repository(lowerCAmelCase__ , clone_from=F'{USER}/test-dynamic-processor' , token=self._token )
processor.save_pretrained(lowerCAmelCase__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor',
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(lowerCAmelCase__ , 'tokenizer_config.json' ) ) as f:
lowercase__: str = json.load(lowerCAmelCase__ )
self.assertDictEqual(
tokenizer_config['auto_map'] , {
'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None],
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase__ , 'custom_feature_extraction.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase__ , 'custom_tokenization.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase__ , 'custom_processing.py' ) ) )
repo.push_to_hub()
lowercase__: Union[str, Any] = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , 'CustomProcessor' )
| 713
|
import socket
def snake_case_ ( ) -> List[str]:
lowercase__: int = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowercase__: Any = socket.gethostname()
lowercase__: Union[str, Any] = 1_23_12
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
lowercase__: Optional[Any] = sock.recv(10_24 )
if not data:
break
out_file.write(snake_case )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 335
| 0
|
def __lowercase ( _UpperCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
|
from heapq import heappop, heappush
import numpy as np
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
__lowercase , __lowercase = grid.shape
__lowercase = [-1, 1, 0, 0]
__lowercase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__lowercase , __lowercase = [(0, source)], set()
__lowercase = np.full((rows, cols) , np.inf )
__lowercase = 0
__lowercase = np.empty((rows, cols) , dtype=_UpperCAmelCase )
__lowercase = None
while queue:
((__lowercase) , (__lowercase)) = heappop(_UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__lowercase = []
while (x, y) != source:
path.append((x, y) )
__lowercase , __lowercase = predecessors[x, y]
path.append(_UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_UpperCAmelCase ) ):
__lowercase , __lowercase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__lowercase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_UpperCAmelCase , (dist + 1, (nx, ny)) )
__lowercase = dist + 1
__lowercase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowercase_ = 4 ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase : Any = abs(lowercase_ ) or 4
return [[1 + x + y * row_size for x in range(lowercase_ )] for y in range(lowercase_ )]
def lowercase__ ( lowercase_ ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(lowercase_ ) )
# OR.. transpose(reverse_column(matrix))
def lowercase__ ( lowercase_ ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(lowercase_ ) )
# OR.. reverse_column(reverse_row(matrix))
def lowercase__ ( lowercase_ ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(lowercase_ ) )
# OR.. transpose(reverse_row(matrix))
def lowercase__ ( lowercase_ ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase : List[Any] = [list(lowercase_ ) for x in zip(*lowercase_ )]
return matrix
def lowercase__ ( lowercase_ ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase : Tuple = matrix[::-1]
return matrix
def lowercase__ ( lowercase_ ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase : List[str] = [x[::-1] for x in matrix]
return matrix
def lowercase__ ( lowercase_ ) -> None:
"""simple docstring"""
for i in matrix:
print(*lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
lowerCamelCase__ = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
lowerCamelCase__ = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 51
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : list[int] ) -> None:
_UpperCamelCase : Tuple = len(__a )
_UpperCamelCase : Dict = [0] * len_array
if len_array > 0:
_UpperCamelCase : Optional[Any] = array[0]
for i in range(1 , __a ):
_UpperCamelCase : Tuple = self.prefix_sum[i - 1] + array[i]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : int , __a : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int ) -> bool:
_UpperCamelCase : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
| 1
|
'''simple docstring'''
import math
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float:
'''simple docstring'''
if (
not isinstance(lowerCamelCase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float:
'''simple docstring'''
if (
not isinstance(lowerCamelCase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a =logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Path ,SCREAMING_SNAKE_CASE__ : Union[str, None] = None ,SCREAMING_SNAKE_CASE__ : Union[List[str], None] = None ,SCREAMING_SNAKE_CASE__ : Union[str, List[str], None] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
__lowerCamelCase : List[str] = [file for file in os.listdir(SCREAMING_SNAKE_CASE__) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))]
if identifier is not None:
__lowerCamelCase : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
for n_ in n_identifier:
__lowerCamelCase : Optional[int] = [file for file in files if n_ not in file]
else:
__lowerCamelCase : Dict = [file for file in files if n_identifier not in file]
__lowerCamelCase : str = ignore_files or []
ignore_files.append('__init__.py')
__lowerCamelCase : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,SCREAMING_SNAKE_CASE__)
if only_modules:
__lowerCamelCase : Optional[int] = file.split('.')[0]
try:
__lowerCamelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = doctest.DocTestSuite(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE__)
self.assertIs(len(result.failures) ,0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__lowerCamelCase : int = doctest.testfile(str('..' / directory / file) ,optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed ,0)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Dict = Path('src/transformers')
__lowerCamelCase : Any = 'modeling'
__lowerCamelCase : Dict = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Tuple = Path('src/transformers')
__lowerCamelCase : Optional[int] = 'tokenization'
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : List[Any] = Path('src/transformers')
__lowerCamelCase : str = 'configuration'
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = Path('src/transformers')
__lowerCamelCase : Any = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,n_identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = Path('docs/source')
__lowerCamelCase : str = ['favicon.ico']
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__ ,only_modules=SCREAMING_SNAKE_CASE__)
| 652
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
A_ = logging.get_logger(__name__)
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 384
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ = {
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["PerceiverFeatureExtractor"]
A_ = ["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 384
| 1
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 47
|
from __future__ import annotations
def a__ ( _UpperCamelCase : list[float] ):
if len(_UpperCamelCase ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
__lowerCamelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175
| 0
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger()
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = True ):
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
UpperCAmelCase = timm.create_model("""levit_128s""" , pretrained=_snake_case )
else:
UpperCAmelCase = timm.create_model("""levit_128""" , pretrained=_snake_case )
if hidden_sizes == 192:
UpperCAmelCase = timm.create_model("""levit_192""" , pretrained=_snake_case )
if hidden_sizes == 256:
UpperCAmelCase = timm.create_model("""levit_256""" , pretrained=_snake_case )
if hidden_sizes == 384:
UpperCAmelCase = timm.create_model("""levit_384""" , pretrained=_snake_case )
from_model.eval()
UpperCAmelCase = LevitForImageClassificationWithTeacher(_snake_case ).eval()
UpperCAmelCase = OrderedDict()
UpperCAmelCase = from_model.state_dict()
UpperCAmelCase = list(from_model.state_dict().keys() )
UpperCAmelCase = list(our_model.state_dict().keys() )
print(len(_snake_case ) , len(_snake_case ) )
for i in range(len(_snake_case ) ):
UpperCAmelCase = weights[og_keys[i]]
our_model.load_state_dict(_snake_case )
UpperCAmelCase = torch.randn((2, 3, 224, 224) )
UpperCAmelCase = from_model(_snake_case )
UpperCAmelCase = our_model(_snake_case ).logits
assert torch.allclose(_snake_case , _snake_case ), "The model logits don't match the original one."
UpperCAmelCase = name
print(_snake_case )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
UpperCAmelCase = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def _a ( _snake_case , _snake_case = None , _snake_case = True ):
"""simple docstring"""
UpperCAmelCase = """imagenet-1k-id2label.json"""
UpperCAmelCase = 1000
UpperCAmelCase = (1, num_labels)
UpperCAmelCase = """huggingface/label-files"""
UpperCAmelCase = num_labels
UpperCAmelCase = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase = {int(_snake_case ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = partial(_snake_case , num_labels=_snake_case , idalabel=_snake_case , labelaid=_snake_case )
UpperCAmelCase = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
UpperCAmelCase = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , _snake_case , names_to_config[model_name] , _snake_case , _snake_case )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , _snake_case , _snake_case , _snake_case , _snake_case )
return config, expected_shape
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 74
|
"""simple docstring"""
def _a ( _snake_case = 10 , _snake_case = 22 ):
"""simple docstring"""
UpperCAmelCase = range(1 , _snake_case )
UpperCAmelCase = range(1 , _snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 74
| 1
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Union[str, Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
__UpperCAmelCase = TextIteratorStreamer(_lowercase )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowercase , _lowercase )
def a ( self : str ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :]
__UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_prompt=_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Tuple ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = torch.ones((1, 5) , device=_lowercase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_special_tokens=_lowercase )
model.generate(_lowercase , max_new_tokens=1 , do_sample=_lowercase , streamer=_lowercase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__UpperCAmelCase = cs.out[:-1] # Remove the final "\n"
__UpperCAmelCase = tokenizer(_lowercase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def a ( self : Tuple ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = TextIteratorStreamer(_lowercase , timeout=0.001 )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowercase ):
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
| 49
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCamelCase : Any = False
class lowercase ( unittest.TestCase):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase):
'''simple docstring'''
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.dual_guided(
prompt='first prompt' , image=snake_case , text_to_image_strength=0.75 , generator=snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case )
SCREAMING_SNAKE_CASE : Any = VersatileDiffusionPipeline.from_pretrained(snake_case , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE : List[str] = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.dual_guided(
prompt='first prompt' , image=snake_case , text_to_image_strength=0.75 , generator=snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = 'cyberpunk 2077'
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe.dual_guided(
prompt=snake_case , image=snake_case , text_to_image_strength=0.75 , generator=snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
SCREAMING_SNAKE_CASE : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Tuple = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Any = 'A painting of a squirrel eating a burger '
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe.text_to_image(
prompt=snake_case , generator=snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
SCREAMING_SNAKE_CASE : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.image_variation(snake_case , generator=snake_case , output_type='numpy' ).images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 352
| 0
|
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowerCAmelCase ( ) -> Any:
'''simple docstring'''
__snake_case = argparse.ArgumentParser()
parser.add_argument("--model_ckpt" , type=_lowerCAmelCase , default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs" , type=_lowerCAmelCase , default=5 )
parser.add_argument("--batch_size" , type=_lowerCAmelCase , default=6 )
parser.add_argument("--gradient_accumulation_steps" , type=_lowerCAmelCase , default=1 )
parser.add_argument("--freeze" , type=_lowerCAmelCase , default=_lowerCAmelCase )
parser.add_argument("--learning_rate" , type=_lowerCAmelCase , default=5E-4 )
parser.add_argument("--seed" , type=_lowerCAmelCase , default=0 )
parser.add_argument("--lr_scheduler_type" , type=_lowerCAmelCase , default="cosine" )
parser.add_argument("--num_warmup_steps" , type=_lowerCAmelCase , default=10 )
parser.add_argument("--weight_decay" , type=_lowerCAmelCase , default=0.01 )
parser.add_argument("--output_dir" , type=_lowerCAmelCase , default="./results" )
return parser.parse_args()
A : Optional[int] = load('accuracy')
def _lowerCAmelCase ( _lowerCAmelCase ) -> Any:
'''simple docstring'''
__snake_case , __snake_case = eval_pred
__snake_case = np.argmax(_lowerCAmelCase , axis=1 )
return metric.compute(predictions=_lowerCAmelCase , references=_lowerCAmelCase )
class UpperCamelCase( _a ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ) -> None:
'''simple docstring'''
super().__init__()
__snake_case = trainer
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ) -> str:
'''simple docstring'''
if control.should_evaluate:
__snake_case = deepcopy(SCREAMING_SNAKE_CASE )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def _lowerCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
__snake_case = get_args()
set_seed(args.seed )
__snake_case = load_dataset("codeparrot/codecomplex" , split="train" )
__snake_case = dataset.train_test_split(test_size=0.2 )
__snake_case = train_test["test"].train_test_split(test_size=0.5 )
__snake_case = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
__snake_case = AutoTokenizer.from_pretrained(args.model_ckpt )
__snake_case = tokenizer.eos_token
__snake_case = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__snake_case = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__snake_case = False
__snake_case = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(_lowerCAmelCase ):
__snake_case = tokenizer(example["src"] , truncation=_lowerCAmelCase , max_length=1024 )
__snake_case = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__snake_case = train_test_validation.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=train_test_validation["train"].column_names , )
__snake_case = DataCollatorWithPadding(tokenizer=_lowerCAmelCase )
__snake_case = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , )
__snake_case = Trainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=_lowerCAmelCase , data_collator=_lowerCAmelCase , compute_metrics=_lowerCAmelCase , )
print("Training..." )
trainer.add_callback(CustomCallback(_lowerCAmelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 473
|
def _lowerCAmelCase ( _lowerCAmelCase ) -> int:
'''simple docstring'''
__snake_case = abs(_lowerCAmelCase )
__snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def _lowerCAmelCase ( _lowerCAmelCase ) -> int:
'''simple docstring'''
__snake_case = abs(_lowerCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _lowerCAmelCase ( _lowerCAmelCase ) -> int:
'''simple docstring'''
return sum(int(_lowerCAmelCase ) for c in str(abs(_lowerCAmelCase ) ) )
def _lowerCAmelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCAmelCase , _lowerCAmelCase ) -> None:
__snake_case = F'''{func.__name__}({value})'''
__snake_case = timeit(F'''__main__.{call}''' , setup="import __main__" )
print(F'''{call:56} = {func(_lowerCAmelCase )} -- {timing:.4f} seconds''' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCAmelCase , _lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 473
| 1
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 146
|
from __future__ import annotations
import numpy as np
def UpperCamelCase_( snake_case__: np.ndarray ) -> tuple[np.ndarray, np.ndarray]:
UpperCAmelCase__ , UpperCAmelCase__ = np.shape(snake_case__ )
if rows != columns:
UpperCAmelCase__ = (
'\'table\' has to be of square shaped array but got a '
f"{rows}x{columns} array:\n{table}"
)
raise ValueError(snake_case__ )
UpperCAmelCase__ = np.zeros((rows, columns) )
UpperCAmelCase__ = np.zeros((rows, columns) )
for i in range(snake_case__ ):
for j in range(snake_case__ ):
UpperCAmelCase__ = sum(lower[i][k] * upper[k][j] for k in range(snake_case__ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
UpperCAmelCase__ = (table[i][j] - total) / upper[j][j]
UpperCAmelCase__ = 1
for j in range(snake_case__ , snake_case__ ):
UpperCAmelCase__ = sum(lower[i][k] * upper[k][j] for k in range(snake_case__ ) )
UpperCAmelCase__ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 146
| 1
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': (
'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class _a ( UpperCamelCase__ ):
_lowercase : Optional[int] = '''trajectory_transformer'''
_lowercase : Any = ['''past_key_values''']
_lowercase : List[str] = {
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self: Optional[int] , UpperCamelCase_: Optional[int]=100 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: int=1 , UpperCamelCase_: Tuple=1 , UpperCamelCase_: Any=249 , UpperCamelCase_: Dict=6 , UpperCamelCase_: List[Any]=17 , UpperCamelCase_: str=25 , UpperCamelCase_: Optional[Any]=4 , UpperCamelCase_: Optional[Any]=4 , UpperCamelCase_: Optional[Any]=128 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: int=0.1 , UpperCamelCase_: List[Any]=0.0006 , UpperCamelCase_: List[Any]=512 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: List[str]=1E-1_2 , UpperCamelCase_: str=1 , UpperCamelCase_: List[Any]=True , UpperCamelCase_: str=1 , UpperCamelCase_: int=50_256 , UpperCamelCase_: Any=50_256 , **UpperCamelCase_: List[str] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = action_weight
lowercase__ = reward_weight
lowercase__ = value_weight
lowercase__ = max_position_embeddings
lowercase__ = block_size
lowercase__ = action_dim
lowercase__ = observation_dim
lowercase__ = transition_dim
lowercase__ = learning_rate
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = n_embd
lowercase__ = embd_pdrop
lowercase__ = attn_pdrop
lowercase__ = resid_pdrop
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = kaiming_initializer_range
lowercase__ = use_cache
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
| 429
|
lowerCAmelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = f'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(SCREAMING_SNAKE_CASE )
lowercase__ = ''''''.join(bin(SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data )
lowercase__ = len(SCREAMING_SNAKE_CASE ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = B'''=''' * ((6 - len(SCREAMING_SNAKE_CASE ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(SCREAMING_SNAKE_CASE ) % 6)
else:
lowercase__ = B''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(SCREAMING_SNAKE_CASE ) , 6 ) ).encode()
+ padding
)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = (
'''argument should be a bytes-like object or ASCII string, '''
f'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(SCREAMING_SNAKE_CASE )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
try:
lowercase__ = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
lowercase__ = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = ''''''.join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = ''''''.join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(SCREAMING_SNAKE_CASE ) , 8 )
]
return bytes(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 429
| 1
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __lowerCamelCase ):
"""simple docstring"""
A__ : List[Any] = "vision-encoder-decoder"
A__ : Dict = True
def __init__( self : List[Any] , **_snake_case : Union[str, Any] ):
"""simple docstring"""
super().__init__(**_snake_case )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
A__ = kwargs.pop('encoder' )
A__ = encoder_config.pop('model_type' )
A__ = kwargs.pop('decoder' )
A__ = decoder_config.pop('model_type' )
A__ = AutoConfig.for_model(_snake_case , **_snake_case )
A__ = AutoConfig.for_model(_snake_case , **_snake_case )
A__ = True
@classmethod
def _a ( cls : List[Any] , _snake_case : PretrainedConfig , _snake_case : PretrainedConfig , **_snake_case : Tuple ):
"""simple docstring"""
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
A__ = True
A__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_snake_case )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = copy.deepcopy(self.__dict__ )
A__ = self.encoder.to_dict()
A__ = self.decoder.to_dict()
A__ = self.__class__.model_type
return output
class __lowerCAmelCase ( __lowerCamelCase ):
"""simple docstring"""
A__ : Optional[int] = version.parse("1.11" )
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return 1E-4
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class __lowerCAmelCase ( __lowerCamelCase ):
"""simple docstring"""
@property
def _a ( self : Any ):
"""simple docstring"""
A__ = OrderedDict()
A__ = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A__ = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A__ = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def _a ( self : Dict , _snake_case : "PreTrainedTokenizerBase" , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional["TensorType"] = None , ):
"""simple docstring"""
import torch
A__ = OrderedDict()
A__ = super().generate_dummy_inputs(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case )
A__ = dummy_input["""input_ids"""].shape
A__ = (batch, encoder_sequence, self._config.encoder_hidden_size)
A__ = dummy_input.pop('input_ids' )
A__ = dummy_input.pop('attention_mask' )
A__ = torch.zeros(_snake_case )
return common_inputs
class __lowerCAmelCase ( __lowerCamelCase ):
"""simple docstring"""
@property
def _a ( self : str ):
"""simple docstring"""
pass
def _a ( self : Any , _snake_case : PretrainedConfig ):
"""simple docstring"""
return VisionEncoderDecoderEncoderOnnxConfig(_snake_case )
def _a ( self : Dict , _snake_case : PretrainedConfig , _snake_case : PretrainedConfig , _snake_case : str = "default" ):
"""simple docstring"""
A__ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_snake_case , _snake_case )
| 9
|
"""simple docstring"""
__UpperCAmelCase = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
__UpperCAmelCase = frozenset(['prompt', 'negative_prompt'])
__UpperCAmelCase = frozenset([])
__UpperCAmelCase = frozenset(['image'])
__UpperCAmelCase = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
__UpperCAmelCase = frozenset(['image'])
__UpperCAmelCase = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
__UpperCAmelCase = frozenset(['prompt', 'image', 'negative_prompt'])
__UpperCAmelCase = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
__UpperCAmelCase = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
__UpperCAmelCase = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
__UpperCAmelCase = frozenset(['image', 'mask_image'])
__UpperCAmelCase = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
__UpperCAmelCase = frozenset(['example_image', 'image', 'mask_image'])
__UpperCAmelCase = frozenset(['class_labels'])
__UpperCAmelCase = frozenset(['class_labels'])
__UpperCAmelCase = frozenset(['batch_size'])
__UpperCAmelCase = frozenset([])
__UpperCAmelCase = frozenset(['batch_size'])
__UpperCAmelCase = frozenset([])
__UpperCAmelCase = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
__UpperCAmelCase = frozenset(['prompt', 'negative_prompt'])
__UpperCAmelCase = frozenset(['input_tokens'])
__UpperCAmelCase = frozenset(['input_tokens'])
| 65
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ (a__ , unittest.TestCase ):
'''simple docstring'''
_a = DiTPipeline
_a = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_a = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_a = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_a = False
def _lowerCAmelCase ( self : int ) ->str:
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__a , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=__a , )
lowerCamelCase_ : List[Any] = AutoencoderKL()
lowerCamelCase_ : int = DDIMScheduler()
lowerCamelCase_ : Any = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def _lowerCAmelCase ( self : int , __a : int , __a : Any=0 ) ->int:
if str(__a ).startswith("""mps""" ):
lowerCamelCase_ : Optional[int] = torch.manual_seed(__a )
else:
lowerCamelCase_ : int = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase_ : Dict = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _lowerCAmelCase ( self : List[str] ) ->int:
lowerCamelCase_ : Optional[int] = """cpu"""
lowerCamelCase_ : List[str] = self.get_dummy_components()
lowerCamelCase_ : Dict = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase_ : Dict = self.get_dummy_inputs(__a )
lowerCamelCase_ : Any = pipe(**__a ).images
lowerCamelCase_ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowerCamelCase_ : Union[str, Any] = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowerCamelCase_ : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def _lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
self._test_inference_batch_single_identical(relax_max_difference=__a , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _lowerCAmelCase ( self : Any ) ->Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE_ (unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self : Any ) ->Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Optional[int] ) ->Any:
lowerCamelCase_ : List[str] = torch.manual_seed(0 )
lowerCamelCase_ : List[str] = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
lowerCamelCase_ : Tuple = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
lowerCamelCase_ : List[Any] = pipe.get_label_ids(__a )
lowerCamelCase_ : Dict = pipe(__a , generator=__a , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(__a , __a ):
lowerCamelCase_ : Optional[Any] = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def _lowerCAmelCase ( self : List[str] ) ->Tuple:
lowerCamelCase_ : int = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
lowerCamelCase_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
lowerCamelCase_ : Optional[int] = ["""vase""", """umbrella"""]
lowerCamelCase_ : Tuple = pipe.get_label_ids(__a )
lowerCamelCase_ : Dict = torch.manual_seed(0 )
lowerCamelCase_ : Dict = pipe(__a , generator=__a , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(__a , __a ):
lowerCamelCase_ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 171
|
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
snake_case__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def __lowerCamelCase ( A__ : Dict ) -> Optional[int]:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'''could not parse string as bool {string}''' )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
snake_case__ : Dict = parser.parse_args()
snake_case__ : List[str] = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 171
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 27
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ = logging.get_logger(__name__)
a_ = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __lowercase ( _UpperCAmelCase , _UpperCAmelCase):
"""simple docstring"""
_A : List[Any] = """swin"""
_A : Any = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__(self , lowercase__=2_24 , lowercase__=4 , lowercase__=3 , lowercase__=96 , lowercase__=[2, 2, 6, 2] , lowercase__=[3, 6, 12, 24] , lowercase__=7 , lowercase__=4.0 , lowercase__=True , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.1 , lowercase__="gelu" , lowercase__=False , lowercase__=0.02 , lowercase__=1e-5 , lowercase__=32 , lowercase__=None , lowercase__=None , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : List[Any] = image_size
snake_case_ : Optional[Any] = patch_size
snake_case_ : Union[str, Any] = num_channels
snake_case_ : List[str] = embed_dim
snake_case_ : str = depths
snake_case_ : Tuple = len(lowercase__ )
snake_case_ : Optional[int] = num_heads
snake_case_ : Dict = window_size
snake_case_ : int = mlp_ratio
snake_case_ : List[Any] = qkv_bias
snake_case_ : List[Any] = hidden_dropout_prob
snake_case_ : Union[str, Any] = attention_probs_dropout_prob
snake_case_ : int = drop_path_rate
snake_case_ : Optional[Any] = hidden_act
snake_case_ : str = use_absolute_embeddings
snake_case_ : str = layer_norm_eps
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Optional[int] = int(embed_dim * 2 ** (len(lowercase__ ) - 1) )
snake_case_ : Optional[int] = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(lowercase__ ) + 1 )]
snake_case_ , snake_case_ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=lowercase__ , out_indices=lowercase__ , stage_names=self.stage_names )
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : str = version.parse("""1.11""")
@property
def __UpperCamelCase (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase (self ):
return 1e-4
| 480
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCamelCase_ : Optional[Any] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
snake_case = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
snake_case = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def lowerCamelCase__ ( self : Optional[int] , _snake_case : Any , _snake_case : Any , _snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
A_ = ZeroShotClassificationPipeline(
model=_snake_case , tokenizer=_snake_case , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def lowerCamelCase__ ( self : Dict , _snake_case : List[Any] , _snake_case : int ) -> List[Any]:
"""simple docstring"""
A_ = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(_snake_case , {"sequence": ANY(_snake_case ), "labels": [ANY(_snake_case )], "scores": [ANY(_snake_case )]} )
# No kwarg
A_ = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(_snake_case , {"sequence": ANY(_snake_case ), "labels": [ANY(_snake_case )], "scores": [ANY(_snake_case )]} )
A_ = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(_snake_case , {"sequence": ANY(_snake_case ), "labels": [ANY(_snake_case )], "scores": [ANY(_snake_case )]} )
A_ = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
_snake_case , {"sequence": ANY(_snake_case ), "labels": [ANY(_snake_case ), ANY(_snake_case )], "scores": [ANY(_snake_case ), ANY(_snake_case )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
A_ = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
_snake_case , {"sequence": ANY(_snake_case ), "labels": [ANY(_snake_case ), ANY(_snake_case )], "scores": [ANY(_snake_case ), ANY(_snake_case )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
A_ = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(_snake_case , {"sequence": ANY(_snake_case ), "labels": [ANY(_snake_case )], "scores": [ANY(_snake_case )]} )
# https://github.com/huggingface/transformers/issues/13846
A_ = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
_snake_case , [
{"sequence": ANY(_snake_case ), "labels": [ANY(_snake_case ), ANY(_snake_case )], "scores": [ANY(_snake_case ), ANY(_snake_case )]}
for i in range(1 )
] , )
A_ = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
_snake_case , [
{"sequence": ANY(_snake_case ), "labels": [ANY(_snake_case ), ANY(_snake_case )], "scores": [ANY(_snake_case ), ANY(_snake_case )]}
for i in range(2 )
] , )
with self.assertRaises(_snake_case ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(_snake_case ):
classifier(_snake_case , candidate_labels="politics" )
with self.assertRaises(_snake_case ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(_snake_case ):
classifier("Who are you voting for in 2020?" , candidate_labels=_snake_case )
with self.assertRaises(_snake_case ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(_snake_case ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=_snake_case , )
self.run_entailment_id(_snake_case )
def lowerCamelCase__ ( self : List[Any] , _snake_case : Pipeline ) -> Dict:
"""simple docstring"""
A_ = zero_shot_classifier.model.config
A_ = config.labelaid
A_ = zero_shot_classifier.entailment_id
A_ = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
A_ = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A_ = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A_ = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
A_ = original_labelaid
self.assertEqual(_snake_case , zero_shot_classifier.entailment_id )
@require_torch
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
A_ = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
A_ = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
A_ = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(_snake_case ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def lowerCamelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
A_ = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
A_ = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(_snake_case ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
A_ = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
A_ = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(_snake_case ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
A_ = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def lowerCamelCase__ ( self : Any ) -> int:
"""simple docstring"""
A_ = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
A_ = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(_snake_case ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
A_ = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 708
|
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
UpperCamelCase_ : Tuple = 637_8137.0
UpperCamelCase_ : List[str] = 635_6752.31_4245
UpperCamelCase_ : Dict = 637_8137
def A_ (__a , __a , __a , __a ):
'''simple docstring'''
A_ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
A_ = atan((1 - flattening) * tan(radians(__a ) ) )
A_ = atan((1 - flattening) * tan(radians(__a ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
A_ = haversine_distance(__a , __a , __a , __a ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
A_ = (b_lata + b_lata) / 2
A_ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
A_ = (sin(__a ) ** 2) * (cos(__a ) ** 2)
A_ = cos(sigma / 2 ) ** 2
A_ = (sigma - sin(__a )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
A_ = (cos(__a ) ** 2) * (sin(__a ) ** 2)
A_ = sin(sigma / 2 ) ** 2
A_ = (sigma + sin(__a )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 482
| 0
|
from math import pi
def a ( A__ : int , A__ : int ) -> float:
"""simple docstring"""
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 291
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowercase_ = datasets.utils.logging.get_logger(__name__)
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ):
_a = None
_a = None
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ):
_a = datasets.Audio()
_a = """audio"""
_a = AudioFolderConfig
_a = 42 # definition at the bottom of the script
_a = AudioClassification(audio_column="""audio""" , label_column="""label""" )
lowercase_ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
lowercase_ = AUDIO_EXTENSIONS
| 291
| 1
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = (DDIMParallelScheduler,)
lowercase_ = (("eta", 0.0), ("num_inference_steps", 50))
def SCREAMING_SNAKE_CASE_ (self : List[str] , **UpperCAmelCase_ : Optional[Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] ={
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**UpperCAmelCase_)
return config
def SCREAMING_SNAKE_CASE_ (self : List[Any] , **UpperCAmelCase_ : List[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config(**UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: Optional[int] =10, 0.0
lowerCamelCase__: List[Any] =self.dummy_model()
lowerCamelCase__: Optional[int] =self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase_)
for t in scheduler.timesteps:
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Any =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_).prev_sample
return sample
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Union[str, Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCAmelCase_)
lowerCamelCase__: Any =self.scheduler_classes[0]
lowerCamelCase__: List[str] =self.get_scheduler_config(steps_offset=1)
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1]))
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->int:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[int]:
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Any:
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->int:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[int]:
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : str) ->Tuple:
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500]):
self.check_over_forward(time_step=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict:
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=UpperCAmelCase_ , eta=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: int =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400) - 0.1_4771)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960) - 0.3_2460)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486) - 0.0_0979)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998) - 0.02)) < 1E-5
def SCREAMING_SNAKE_CASE_ (self : int) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: List[Any] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: Tuple =10, 0.0
scheduler.set_timesteps(UpperCAmelCase_)
lowerCamelCase__: Tuple =self.dummy_model()
lowerCamelCase__: List[str] =self.dummy_sample_deter
lowerCamelCase__: str =self.dummy_sample_deter + 0.1
lowerCamelCase__: Any =self.dummy_sample_deter - 0.1
lowerCamelCase__: List[Any] =samplea.shape[0]
lowerCamelCase__: Any =torch.stack([samplea, samplea, samplea] , dim=0)
lowerCamelCase__: Optional[Any] =torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_)
lowerCamelCase__: Any =model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowerCamelCase__: int =scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , UpperCAmelCase_)
lowerCamelCase__: Any =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: List[str] =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1147.7904) < 1E-2
assert abs(result_mean.item() - 0.4982) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.full_loop()
lowerCamelCase__: Dict =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: List[Any] =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 172.0067) < 1E-2
assert abs(result_mean.item() - 0.22_3967) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.full_loop(prediction_type="v_prediction")
lowerCamelCase__: int =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Optional[Any] =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 52.5302) < 1E-2
assert abs(result_mean.item() - 0.0684) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Any:
'''simple docstring'''
lowerCamelCase__: int =self.full_loop(set_alpha_to_one=UpperCAmelCase_ , beta_start=0.01)
lowerCamelCase__: Union[str, Any] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Optional[Any] =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 149.8295) < 1E-2
assert abs(result_mean.item() - 0.1951) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.full_loop(set_alpha_to_one=UpperCAmelCase_ , beta_start=0.01)
lowerCamelCase__: Any =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: int =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 149.0784) < 1E-2
assert abs(result_mean.item() - 0.1941) < 1E-3
| 437
|
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=100 , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Optional[int]=30 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Optional[Any]=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Optional[int]=3 , ) ->int:
'''simple docstring'''
lowerCamelCase__: List[Any] =parent
lowerCamelCase__: List[Any] =vocab_size
lowerCamelCase__: List[str] =batch_size
lowerCamelCase__: str =image_size
lowerCamelCase__: Any =patch_size
lowerCamelCase__: int =num_channels
lowerCamelCase__: str =is_training
lowerCamelCase__: Tuple =use_labels
lowerCamelCase__: Optional[int] =hidden_size
lowerCamelCase__: Union[str, Any] =num_hidden_layers
lowerCamelCase__: List[Any] =num_attention_heads
lowerCamelCase__: Optional[Any] =intermediate_size
lowerCamelCase__: Tuple =hidden_act
lowerCamelCase__: Optional[Any] =hidden_dropout_prob
lowerCamelCase__: Union[str, Any] =attention_probs_dropout_prob
lowerCamelCase__: List[Any] =type_sequence_label_size
lowerCamelCase__: List[str] =initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__: Union[str, Any] =(image_size // patch_size) ** 2
lowerCamelCase__: str =num_patches + 1
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCamelCase__: Any =None
if self.use_labels:
lowerCamelCase__: Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCamelCase__: Dict =BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =FlaxBeitModel(config=UpperCAmelCase_)
lowerCamelCase__: Any =model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Any =FlaxBeitForMaskedImageModeling(config=UpperCAmelCase_)
lowerCamelCase__: Any =model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size))
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any) ->List[str]:
'''simple docstring'''
lowerCamelCase__: str =self.type_sequence_label_size
lowerCamelCase__: Any =FlaxBeitForImageClassification(config=UpperCAmelCase_)
lowerCamelCase__: List[str] =model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
lowerCamelCase__: int =1
lowerCamelCase__: int =FlaxBeitForImageClassification(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
lowerCamelCase__: List[str] =model(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->str:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
): List[Any] =config_and_inputs
lowerCamelCase__: Dict ={"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->None:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =FlaxBeitModelTester(self)
lowerCamelCase__: Any =ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: int =model_class(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__: Dict =[*signature.parameters.keys()]
lowerCamelCase__: Dict =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->List[str]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowerCamelCase__: List[str] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Any =model_class(UpperCAmelCase_)
@jax.jit
def model_jitted(UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple):
return model(pixel_values=UpperCAmelCase_ , **UpperCAmelCase_)
with self.subTest("JIT Enabled"):
lowerCamelCase__: Tuple =model_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
lowerCamelCase__: Tuple =model_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def SCREAMING_SNAKE_CASE_ (self : str) ->str:
'''simple docstring'''
lowerCamelCase__: Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : str) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : str) ->str:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : str) ->Any:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__: Any =model_class_name.from_pretrained("microsoft/beit-base-patch16-224")
lowerCamelCase__: Dict =model(np.ones((1, 3, 224, 224)))
self.assertIsNotNone(UpperCAmelCase_)
def lowerCAmelCase_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase__: Any =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[str]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224") if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k")
lowerCamelCase__: Optional[int] =self.default_image_processor
lowerCamelCase__: int =prepare_img()
lowerCamelCase__: Optional[Any] =image_processor(images=UpperCAmelCase_ , return_tensors="np").pixel_values
# prepare bool_masked_pos
lowerCamelCase__: List[str] =np.ones((1, 196) , dtype=UpperCAmelCase_)
# forward pass
lowerCamelCase__: List[str] =model(pixel_values=UpperCAmelCase_ , bool_masked_pos=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =outputs.logits
# verify the logits
lowerCamelCase__: str =(1, 196, 8_192)
self.assertEqual(logits.shape , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]])
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , UpperCAmelCase_ , atol=1E-2))
@slow
def SCREAMING_SNAKE_CASE_ (self : str) ->Any:
'''simple docstring'''
lowerCamelCase__: List[Any] =FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224")
lowerCamelCase__: int =self.default_image_processor
lowerCamelCase__: str =prepare_img()
lowerCamelCase__: List[str] =image_processor(images=UpperCAmelCase_ , return_tensors="np")
# forward pass
lowerCamelCase__: Union[str, Any] =model(**UpperCAmelCase_)
lowerCamelCase__: int =outputs.logits
# verify the logits
lowerCamelCase__: Dict =(1, 1_000)
self.assertEqual(logits.shape , UpperCAmelCase_)
lowerCamelCase__: Any =np.array([-1.2385, -1.0987, -1.0108])
self.assertTrue(np.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1E-4))
lowerCamelCase__: List[str] =281
self.assertEqual(logits.argmax(-1).item() , UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[int] =FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k")
lowerCamelCase__: List[str] =self.default_image_processor
lowerCamelCase__: Dict =prepare_img()
lowerCamelCase__: List[Any] =image_processor(images=UpperCAmelCase_ , return_tensors="np")
# forward pass
lowerCamelCase__: Optional[Any] =model(**UpperCAmelCase_)
lowerCamelCase__: Any =outputs.logits
# verify the logits
lowerCamelCase__: Any =(1, 21_841)
self.assertEqual(logits.shape , UpperCAmelCase_)
lowerCamelCase__: str =np.array([1.6881, -0.2787, 0.5901])
self.assertTrue(np.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1E-4))
lowerCamelCase__: List[str] =2_396
self.assertEqual(logits.argmax(-1).item() , UpperCAmelCase_)
| 437
| 1
|
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __snake_case ( lowerCAmelCase : List[str] ):
def wrapper(*lowerCAmelCase : Any , **lowerCAmelCase : List[Any] ):
__UpperCAmelCase = timeit.default_timer()
__UpperCAmelCase = func(*lowerCAmelCase , **lowerCAmelCase )
__UpperCAmelCase = timeit.default_timer() - starttime
return delta
__UpperCAmelCase = func.__name__
return wrapper
def __snake_case ( lowerCAmelCase : dict , lowerCAmelCase : Any=100 , lowerCAmelCase : int=None ):
__UpperCAmelCase = []
__UpperCAmelCase = seq_shapes or {}
for i in range(lowerCAmelCase ):
__UpperCAmelCase = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase , _ArrayXD ):
__UpperCAmelCase = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
__UpperCAmelCase = 'The small grey turtle was surprisingly fast when challenged.'
else:
__UpperCAmelCase = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase , datasets.Sequence ):
while isinstance(lowerCAmelCase , datasets.Sequence ):
__UpperCAmelCase = v.feature
__UpperCAmelCase = seq_shapes[k]
__UpperCAmelCase = np.random.rand(*lowerCAmelCase ).astype(v.dtype )
__UpperCAmelCase = data
dummy_data.append((i, example) )
return dummy_data
def __snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Any , lowerCAmelCase : str=100 , lowerCAmelCase : Any=None ):
__UpperCAmelCase = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase )
with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase ) as writer:
for key, record in dummy_data:
__UpperCAmelCase = features.encode_example(lowerCAmelCase )
writer.write(lowerCAmelCase )
__UpperCAmelCase , __UpperCAmelCase = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
__UpperCAmelCase = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase ) )
return dataset
| 396
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCamelCase : str = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 396
| 1
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , __magic_name__=None , **__magic_name__ ):
"""simple docstring"""
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' )
A_ : Optional[Any] = model
A_ : List[str] = kwargs.get('''model_save_dir''' , UpperCamelCase__ )
A_ : int = kwargs.get('''latest_model_name''' , UpperCamelCase__ )
def __call__( self , **__magic_name__ ):
"""simple docstring"""
A_ : Optional[int] = {k: np.array(UpperCamelCase__ ) for k, v in kwargs.items()}
return self.model.run(UpperCamelCase__ , UpperCamelCase__ )
@staticmethod
def UpperCAmelCase ( __magic_name__ , __magic_name__=None , __magic_name__=None ):
"""simple docstring"""
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' )
A_ : int = '''CPUExecutionProvider'''
return ort.InferenceSession(UpperCamelCase__ , providers=[provider] , sess_options=UpperCamelCase__ )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None , **__magic_name__ ):
"""simple docstring"""
A_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
A_ : List[Any] = self.model_save_dir.joinpath(self.latest_model_name )
A_ : Union[str, Any] = Path(UpperCamelCase__ ).joinpath(UpperCamelCase__ )
try:
shutil.copyfile(UpperCamelCase__ , UpperCamelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
A_ : List[str] = self.model_save_dir.joinpath(UpperCamelCase__ )
if src_path.exists():
A_ : List[str] = Path(UpperCamelCase__ ).joinpath(UpperCamelCase__ )
try:
shutil.copyfile(UpperCamelCase__ , UpperCamelCase__ )
except shutil.SameFileError:
pass
def UpperCAmelCase ( self , __magic_name__ , **__magic_name__ , ):
"""simple docstring"""
if os.path.isfile(UpperCamelCase__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
# saving model weights/files
self._save_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def UpperCAmelCase ( cls , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
"""simple docstring"""
A_ : Optional[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCamelCase__ ):
A_ : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , provider=UpperCamelCase__ , sess_options=UpperCamelCase__ )
A_ : Tuple = Path(UpperCamelCase__ )
# load model from hub
else:
# download model
A_ : Union[str, Any] = hf_hub_download(
repo_id=UpperCamelCase__ , filename=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , revision=UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , )
A_ : Optional[int] = Path(UpperCamelCase__ ).parent
A_ : int = Path(UpperCamelCase__ ).name
A_ : Union[str, Any] = OnnxRuntimeModel.load_model(UpperCamelCase__ , provider=UpperCamelCase__ , sess_options=UpperCamelCase__ )
return cls(model=UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def UpperCAmelCase ( cls , __magic_name__ , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
"""simple docstring"""
A_ : Union[str, Any] = None
if len(str(UpperCamelCase__ ).split('''@''' ) ) == 2:
A_ , A_ : Tuple = model_id.split('''@''' )
return cls._from_pretrained(
model_id=UpperCamelCase__ , revision=UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , **UpperCamelCase__ , )
| 700
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 236
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : List[Any] = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Any = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
SCREAMING_SNAKE_CASE : Any = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : Optional[Any] =VOCAB_FILES_NAMES
lowercase : str =PRETRAINED_VOCAB_FILES_MAP
lowercase : List[Any] =PRETRAINED_INIT_CONFIGURATION
lowercase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Tuple =SqueezeBertTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_="[UNK]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , tokenize_chinese_chars=__lowerCAmelCase , strip_accents=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase_ :List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowerCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowerCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowerCAmelCase ) != tokenize_chinese_chars
):
lowercase_ :List[str] = getattr(__lowerCAmelCase , normalizer_state.pop('''type''' ) )
lowercase_ :List[Any] = do_lower_case
lowercase_ :Dict = strip_accents
lowercase_ :Union[str, Any] = tokenize_chinese_chars
lowercase_ :Optional[Any] = normalizer_class(**__lowerCAmelCase )
lowercase_ :Tuple = do_lower_case
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=None ):
lowercase_ :Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
lowercase_ :Tuple = [self.sep_token_id]
lowercase_ :List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
lowercase_ :Any = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
| 257
|
from math import factorial
_a = {str(digit): factorial(digit) for digit in range(10)}
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
if not isinstance(__snake_case ,__snake_case ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__snake_case ) )
def lowerCAmelCase__(__snake_case = 60 ,__snake_case = 1000000 ) -> int:
'''simple docstring'''
if not isinstance(__snake_case ,__snake_case ) or not isinstance(__snake_case ,__snake_case ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
lowerCamelCase__ = 0
# the cached sizes of the previous chains
lowerCamelCase__ = {}
for start_chain_element in range(1 ,__snake_case ):
# The temporary set will contain the elements of the chain
lowerCamelCase__ = set()
lowerCamelCase__ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCamelCase__ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__snake_case )
chain_set_length += 1
lowerCamelCase__ = digit_factorial_sum(__snake_case )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCamelCase__ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution()}""")
| 481
| 0
|
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase_ :
"""simple docstring"""
def __init__( self : int ,lowercase__ : str = "cpu" ,lowercase__ : str = "openai/clip-vit-large-patch14" ):
__lowercase = device
__lowercase = CLIPTokenizerFast.from_pretrained(lowercase__ )
__lowercase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
__lowercase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
__lowercase = torchvision.transforms.Normalize(self.image_mean ,self.image_std )
__lowercase = torchvision.transforms.Resize(2_2_4 )
__lowercase = torchvision.transforms.CenterCrop(2_2_4 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ):
__lowercase = self.resize(lowercase__ )
__lowercase = self.center_crop(lowercase__ )
__lowercase = self.normalize(lowercase__ )
return images
def __call__( self : Dict ,lowercase__ : str=None ,lowercase__ : Optional[int]=None ,**lowercase__ : List[str] ):
__lowercase = self.tokenizer(text=lowercase__ ,**lowercase__ )
__lowercase = self.preprocess_img(lowercase__ )
__lowercase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : str=1_0 ,lowercase__ : str=0.0_1 ,lowercase__ : Optional[int]=None ,lowercase__ : Optional[Any]=None ,lowercase__ : List[str]=None ,lowercase__ : Any=None ,lowercase__ : Optional[int]=None ,lowercase__ : str=None ,lowercase__ : List[str]=False ,lowercase__ : Union[str, Any]=True ,lowercase__ : Optional[int]="image" ,lowercase__ : Tuple=True ,lowercase__ : int=False ,lowercase__ : Tuple=False ,lowercase__ : Optional[Any]=False ,):
super().__init__()
__lowercase = None
__lowercase = device if device else get_device()
if vqgan:
__lowercase = vqgan
else:
__lowercase = load_vqgan(self.device ,conf_path=lowercase__ ,ckpt_path=lowercase__ )
self.vqgan.eval()
if clip:
__lowercase = clip
else:
__lowercase = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
__lowercase = ProcessorGradientFlow(device=self.device )
__lowercase = iterations
__lowercase = lr
__lowercase = log
__lowercase = make_grid
__lowercase = return_val
__lowercase = quantize
__lowercase = self.vqgan.decoder.z_shape
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Optional[int]=None ,lowercase__ : str=None ,lowercase__ : Dict=5 ,lowercase__ : Tuple=True ):
__lowercase = []
if output_path is None:
__lowercase = '''./animation.gif'''
if input_path is None:
__lowercase = self.save_path
__lowercase = sorted(glob(input_path + '''/*''' ) )
if not len(lowercase__ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(lowercase__ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
__lowercase = total_duration / len(lowercase__ )
__lowercase = [frame_duration] * len(lowercase__ )
if extend_frames:
__lowercase = 1.5
__lowercase = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(lowercase__ ) )
imageio.mimsave(lowercase__ ,lowercase__ ,duration=lowercase__ )
print(F"gif saved to {output_path}" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Any=None ,lowercase__ : Tuple=None ):
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
__lowercase = preprocess(Image.open(lowercase__ ) ,target_image_size=2_5_6 ).to(self.device )
__lowercase = preprocess_vqgan(lowercase__ )
__lowercase , *__lowercase = self.vqgan.encode(lowercase__ )
return z
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Union[str, Any] ):
__lowercase = self.latent.detach().requires_grad_()
__lowercase = base_latent + transform_vector
if self.quantize:
__lowercase , *__lowercase = self.vqgan.quantize(lowercase__ )
else:
__lowercase = trans_latent
return self.vqgan.decode(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Dict=None ):
__lowercase = self.clip_preprocessor(text=lowercase__ ,images=lowercase__ ,return_tensors='''pt''' ,padding=lowercase__ )
__lowercase = self.clip(**lowercase__ )
__lowercase = clip_outputs.logits_per_image
if weights is not None:
__lowercase = similarity_logits * weights
return similarity_logits.sum()
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,lowercase__ : List[str] ):
__lowercase = self._get_clip_similarity(pos_prompts['''prompts'''] ,lowercase__ ,weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
__lowercase = self._get_clip_similarity(neg_prompts['''prompts'''] ,lowercase__ ,weights=neg_prompts['''weights'''] )
else:
__lowercase = torch.tensor([1] ,device=self.device )
__lowercase = -torch.log(lowercase__ ) + torch.log(lowercase__ )
return loss
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ):
__lowercase = torch.randn_like(self.latent ,requires_grad=lowercase__ ,device=self.device )
__lowercase = torch.optim.Adam([vector] ,lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__lowercase = self._add_vector(lowercase__ )
__lowercase = loop_post_process(lowercase__ )
__lowercase = self._get_CLIP_loss(lowercase__ ,lowercase__ ,lowercase__ )
print('''CLIP loss''' ,lowercase__ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=lowercase__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Optional[int] ,lowercase__ : int ,lowercase__ : List[str] ):
wandb.init(reinit=lowercase__ ,project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
__lowercase = Image.open(lowercase__ )
__lowercase = image.resize((2_5_6, 2_5_6) )
wandb.log('''Original Image''' ,wandb.Image(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
if not prompts:
return []
__lowercase = []
__lowercase = []
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(lowercase__ ,(tuple, list) ):
__lowercase = prompt[0]
__lowercase = float(prompt[1] )
elif ":" in prompt:
__lowercase , __lowercase = prompt.split(''':''' )
__lowercase = float(lowercase__ )
else:
__lowercase = prompt
__lowercase = 1.0
processed_prompts.append(lowercase__ )
weights.append(lowercase__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowercase__ ,device=self.device ),
}
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Tuple ,lowercase__ : Optional[int]=None ,lowercase__ : Optional[int]=None ,lowercase__ : List[Any]=True ,lowercase__ : List[str]=False ,lowercase__ : int=True ,lowercase__ : Optional[int]=True ,lowercase__ : Dict=None ,):
if image_path:
__lowercase = self._get_latent(lowercase__ )
else:
__lowercase = torch.randn(self.latent_dim ,device=self.device )
if self.log:
self._init_logging(lowercase__ ,lowercase__ ,lowercase__ )
assert pos_prompts, "You must provide at least one positive prompt."
__lowercase = self.process_prompts(lowercase__ )
__lowercase = self.process_prompts(lowercase__ )
if save_final and save_path is None:
__lowercase = os.path.join('''./outputs/''' ,'''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(lowercase__ ):
os.makedirs(lowercase__ )
else:
__lowercase = save_path + '''_''' + get_timestamp()
os.makedirs(lowercase__ )
__lowercase = save_path
__lowercase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(lowercase__ ) )
__lowercase = loop_post_process(lowercase__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowercase__ ,lowercase__ ,lowercase__ ) ):
if show_intermediate:
show_pil(lowercase__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path ,F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(lowercase__ )} )
if show_final:
show_pil(lowercase__ )
if save_final:
transformed_img.save(os.path.join(self.save_path ,F"iter_{iter:03d}_final.png" ) )
| 624
|
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ = numpy.array([0, 0])
lowerCAmelCase__ = numpy.array([0.5, 0.8_660_254])
lowerCAmelCase__ = numpy.array([1, 0])
lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = initial_vectors
for _ in range(A__ ):
__lowercase = iteration_step(A__ )
return vectors
def _A ( A__ ):
"""simple docstring"""
__lowercase = []
for i, start_vector in enumerate(vectors[:-1] ):
__lowercase = vectors[i + 1]
new_vectors.append(A__ )
__lowercase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = numpy.radians(A__ )
__lowercase , __lowercase = numpy.cos(A__ ), numpy.sin(A__ )
__lowercase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__lowercase , __lowercase = zip(*A__ )
plt.plot(A__ , A__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 624
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 658
|
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
UpperCamelCase :Dict = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCamelCase :Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
UpperCamelCase :int = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 )
UpperCamelCase :Optional[Any] = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 2_55.0
UpperCamelCase :List[str] = image.transpose(0 , 3 , 1 , 2 )
UpperCamelCase :Tuple = 2.0 * image - 1.0
UpperCamelCase :Any = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
elif isinstance(image[0] , torch.Tensor ):
UpperCamelCase :str = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
return image
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=0.99_95 ):
if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
UpperCamelCase :int = True
UpperCamelCase :Dict = va.device
UpperCamelCase :List[Any] = va.cpu().numpy()
UpperCamelCase :str = va.cpu().numpy()
UpperCamelCase :Dict = np.sum(va * va / (np.linalg.norm(SCREAMING_SNAKE_CASE__ ) * np.linalg.norm(SCREAMING_SNAKE_CASE__ )) )
if np.abs(SCREAMING_SNAKE_CASE__ ) > DOT_THRESHOLD:
UpperCamelCase :Any = (1 - t) * va + t * va
else:
UpperCamelCase :Union[str, Any] = np.arccos(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = np.sin(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = theta_a * t
UpperCamelCase :str = np.sin(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = np.sin(theta_a - theta_t ) / sin_theta_a
UpperCamelCase :List[Any] = sin_theta_t / sin_theta_a
UpperCamelCase :Union[str, Any] = sa * va + sa * va
if inputs_are_torch:
UpperCamelCase :Dict = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
return va
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ):
UpperCamelCase :int = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
UpperCamelCase :int = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any ):
for param in model.parameters():
UpperCamelCase :Any = value
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ) -> str:
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , clip_model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , coca_model=SCREAMING_SNAKE_CASE_ , coca_tokenizer=SCREAMING_SNAKE_CASE_ , coca_transform=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase :Union[str, Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , SCREAMING_SNAKE_CASE_ )
else feature_extractor.size['''shortest_edge''']
)
UpperCamelCase :Any = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , SCREAMING_SNAKE_CASE_ )
set_requires_grad(self.clip_model , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase :Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
# get the original timestep using init_timestep
UpperCamelCase :Union[str, Any] = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase :Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE_ )}''' )
UpperCamelCase :Tuple = image.to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :int = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE_ )
]
UpperCamelCase :List[str] = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
else:
UpperCamelCase :Any = self.vae.encode(SCREAMING_SNAKE_CASE_ ).latent_dist.sample(SCREAMING_SNAKE_CASE_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase :List[str] = 0.1_8215 * init_latents
UpperCamelCase :Optional[Any] = init_latents.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
UpperCamelCase :List[Any] = randn_tensor(init_latents.shape , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
# get latents
UpperCamelCase :Optional[Any] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = init_latents
return latents
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase :List[str] = self.coca_transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
UpperCamelCase :Any = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
UpperCamelCase :List[Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase :str = self.feature_extractor.preprocess(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
UpperCamelCase :int = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = image_embeddings_clip.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[int]:
UpperCamelCase :List[str] = latents.detach().requires_grad_()
UpperCamelCase :List[str] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
UpperCamelCase :List[Any] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
UpperCamelCase :List[str] = self.scheduler.alphas_cumprod[timestep]
UpperCamelCase :Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase :List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
UpperCamelCase :int = torch.sqrt(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :str = self.scheduler.sigmas[index]
UpperCamelCase :Union[str, Any] = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase :int = 1 / 0.1_8215 * sample
UpperCamelCase :List[Any] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase :str = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase :List[str] = transforms.Resize(self.feature_extractor_size )(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = self.normalize(SCREAMING_SNAKE_CASE_ ).to(latents.dtype )
UpperCamelCase :List[Any] = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = spherical_dist_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).mean() * clip_guidance_scale
UpperCamelCase :Union[str, Any] = -torch.autograd.grad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0]
if isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Dict = latents.detach() + grads * (sigma**2)
UpperCamelCase :Optional[Any] = noise_pred_original
else:
UpperCamelCase :List[str] = noise_pred_original - torch.sqrt(SCREAMING_SNAKE_CASE_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.6 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 100 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0.8 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , ) -> Dict:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(SCREAMING_SNAKE_CASE_ )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Generator ) and batch_size > 1:
UpperCamelCase :Optional[int] = [generator] + [None] * (batch_size - 1)
UpperCamelCase :Tuple = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
UpperCamelCase :Union[str, Any] = [x[0] for x in coca_is_none if x[1]]
UpperCamelCase :Dict = ''', '''.join(SCREAMING_SNAKE_CASE_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
UpperCamelCase :Any = self.get_image_description(SCREAMING_SNAKE_CASE_ )
if style_prompt is None:
if len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
UpperCamelCase :str = self.get_image_description(SCREAMING_SNAKE_CASE_ )
# get prompt text embeddings for content and style
UpperCamelCase :List[Any] = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
UpperCamelCase :Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase :List[Any] = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
UpperCamelCase :Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase :Dict = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase :Union[str, Any] = text_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
# set timesteps
UpperCamelCase :str = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
UpperCamelCase :List[str] = {}
if accepts_offset:
UpperCamelCase :Tuple = 1
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
UpperCamelCase , UpperCamelCase :Tuple = self.get_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
UpperCamelCase :Any = timesteps[:1].repeat(SCREAMING_SNAKE_CASE_ )
# Preprocess image
UpperCamelCase :Union[str, Any] = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = self.prepare_latents(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = self.prepare_latents(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if clip_guidance_scale > 0:
UpperCamelCase :Dict = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = slerp(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase :Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase :Any = content_text_input.input_ids.shape[-1]
UpperCamelCase :Any = self.tokenizer([''''''] , padding='''max_length''' , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
UpperCamelCase :Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
UpperCamelCase :Optional[int] = uncond_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase :str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase :Any = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase :int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
UpperCamelCase :List[str] = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='''cpu''' , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
UpperCamelCase :int = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCamelCase :str = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase :Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase :Optional[int] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase :Dict = {}
if accepts_eta:
UpperCamelCase :int = eta
# check if the scheduler accepts generator
UpperCamelCase :Optional[int] = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
UpperCamelCase :List[str] = generator
with self.progress_bar(total=SCREAMING_SNAKE_CASE_ ):
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase :Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase :List[Any] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
UpperCamelCase :List[str] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase :Any = noise_pred.chunk(2 )
UpperCamelCase :Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
UpperCamelCase :int = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
UpperCamelCase , UpperCamelCase :str = self.cond_fn(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase :List[str] = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase :List[Any] = 1 / 0.1_8215 * latents
UpperCamelCase :Optional[Any] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase :str = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase :List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 658
| 1
|
"""simple docstring"""
def A( snake_case_ ):
"""simple docstring"""
return 10 - x * x
def A( snake_case_ , snake_case_ ):
"""simple docstring"""
if equation(snake_case_ ) * equation(snake_case_ ) >= 0:
raise ValueError("Wrong space!" )
lowercase__: Union[str, Any] = a
while (b - a) >= 0.01:
# Find middle point
lowercase__: Tuple = (a + b) / 2
# Check if middle point is root
if equation(snake_case_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case_ ) * equation(snake_case_ ) < 0:
lowercase__: Tuple = c
else:
lowercase__: List[str] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 120
|
"""simple docstring"""
from math import factorial
def A( snake_case_ = 20 ):
"""simple docstring"""
lowercase__: Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowercase__: int = n // 2
return int(factorial(snake_case_ ) / (factorial(snake_case_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCamelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 120
| 1
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 76
|
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class UpperCAmelCase_ :
def __init__( self ) -> str:
__lowercase : List[Any] = psutil.Process()
__lowercase : Any = False
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : Optional[Any] = -1
while True:
__lowercase : List[str] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : List[Any] = True
__lowercase : List[Any] = threading.Thread(target=self.peak_monitor )
__lowercase : Optional[int] = True
self.thread.start()
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Union[str, Any] = False
self.thread.join()
return self.cpu_memory_peak
a_ = PeakCPUMemory()
def __UpperCAmelCase ( ):
# Time
__lowercase : Union[str, Any] = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowercase : List[Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowercase : List[str] = torch.cuda.memory_allocated(__UpperCamelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def __UpperCAmelCase ( __UpperCamelCase ):
# Time
__lowercase : List[Any] = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowercase : Union[str, Any] = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
__lowercase : Dict = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowercase : str = (torch.cuda.memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20
__lowercase : Optional[int] = (torch.cuda.max_memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20
return measures
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
print(f"""{description}:""" )
print(f"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(f"""- GPU {i} allocated: {measures[str(__UpperCamelCase )]:.2f}MiB""" )
__lowercase : Dict = measures[f"""{i}-peak"""]
print(f"""- GPU {i} peak: {peak:.2f}MiB""" )
print(f"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(f"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 76
| 1
|
'''simple docstring'''
UpperCAmelCase = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 100_0000,
"gigajoule": 10_0000_0000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 360_0000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 418_6800.00,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 1055.0_5585,
"footpound": 1.35_5818,
}
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCAmelCase = (
f'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'
f'Valid values are: {", ".join(_SCREAMING_SNAKE_CASE )}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344
|
'''simple docstring'''
def _snake_case ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] ) -> str:
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowerCAmelCase = mf_knapsack(i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = max(
mf_knapsack(i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , mf_knapsack(i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , j - wt[i - 1] ) + val[i - 1] , )
lowerCAmelCase = val
return f[i][j]
def _snake_case ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
lowerCAmelCase = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowerCAmelCase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowerCAmelCase = dp[i - 1][w_]
return dp[n][w_], dp
def _snake_case ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list ) -> List[str]:
"""simple docstring"""
if not (isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
if num_items != len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = (
"""The number of weights must be the same as the number of values.\n"""
f'But got {num_items} weights and {len(_SCREAMING_SNAKE_CASE )} values'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
if not isinstance(wt[i] , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = (
"""All weights must be integers but got weight of """
f'type {type(wt[i] )} at index {i}'
)
raise TypeError(_SCREAMING_SNAKE_CASE )
lowerCAmelCase, lowerCAmelCase = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = set()
_construct_solution(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return optimal_val, example_optional_set
def _snake_case ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : set ) -> str:
"""simple docstring"""
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
optimal_set.add(_SCREAMING_SNAKE_CASE )
_construct_solution(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , i - 1 , j - wt[i - 1] , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = [3, 2, 4, 4]
UpperCAmelCase = [4, 3, 2, 3]
UpperCAmelCase = 4
UpperCAmelCase = 6
UpperCAmelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
UpperCAmelCase , UpperCAmelCase = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
UpperCAmelCase , UpperCAmelCase = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 344
| 1
|
"""simple docstring"""
import os
from collections.abc import Iterator
def __magic_name__ ( __snake_case : str = "." ) -> int:
for dir_path, dir_names, filenames in os.walk(lowercase__ ):
lowercase : Optional[Any] = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(lowercase__ )[1] in (".py", ".ipynb"):
yield os.path.join(lowercase__ , lowercase__ ).lstrip("./" )
def __magic_name__ ( __snake_case : Optional[Any] ) -> Optional[Any]:
return f"""{i * " "}*""" if i else "\n##"
def __magic_name__ ( __snake_case : str , __snake_case : str ) -> List[Any]:
lowercase : Optional[int] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(lowercase__ ) or old_parts[i] != new_part) and new_part:
print(f"""{md_prefix(lowercase__ )} {new_part.replace("_" , " " ).title()}""" )
return new_path
def __magic_name__ ( __snake_case : str = "." ) -> Any:
lowercase : Tuple = ""
for filepath in sorted(good_file_paths(lowercase__ ) ):
lowercase , lowercase : Tuple = os.path.split(lowercase__ )
if filepath != old_path:
lowercase : int = print_path(lowercase__ , lowercase__ )
lowercase : Union[str, Any] = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase : Any = f"""{filepath}/{filename}""".replace(" " , "%20" )
lowercase : Any = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(f"""{md_prefix(lowercase__ )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md(""".""")
| 361
|
"""simple docstring"""
from __future__ import annotations
def a_ ( lowercase__ :list[float] ):
if len(lowercase__ ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__lowerCamelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281
| 0
|
"""simple docstring"""
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
UpperCamelCase : Tuple = get_logger()
UpperCamelCase : Optional[dict] = None
class lowerCamelCase__ ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
def __init__( self : Tuple , _lowercase : Optional[Any]=None , _lowercase : Optional[Any]=None , **_lowercase : Optional[int] ):
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A , __A ):
raise ValueError(
f'Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` '
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
A = device if isinstance(__A , __A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'Device with string identifier {self.device} not listed among the available '
f'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '
f'device: {str(jax.devices()[0] )}.' )
A = str(jax.devices()[0] )
A = jnp_array_kwargs
@staticmethod
def __a ( ):
import jax
return {str(__A ): device for device in jax.devices()}
def __a ( self : Union[str, Any] , _lowercase : str ):
import jax
import jax.numpy as jnp
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A , axis=0 )
return column
def __a ( self : Dict , _lowercase : Dict ):
import jax
import jax.numpy as jnp
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
A = {}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
A = {"dtype": jnp.intaa}
else:
A = {"dtype": jnp.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
A = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
A = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} )
def __a ( self : Tuple , _lowercase : Optional[int] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A , '__array__' ) and not isinstance(__A , jax.Array ):
A = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def __a ( self : Tuple , _lowercase : dict ):
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def __a ( self : Union[str, Any] , _lowercase : pa.Table ):
A = self.numpy_arrow_extractor().extract_row(__A )
A = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def __a ( self : Any , _lowercase : pa.Table ):
A = self.numpy_arrow_extractor().extract_column(__A )
A = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
A = self.recursive_tensorize(__A )
A = self._consolidate(__A )
return column
def __a ( self : Optional[int] , _lowercase : pa.Table ):
A = self.numpy_arrow_extractor().extract_batch(__A )
A = self.python_features_decoder.decode_batch(__A )
A = self.recursive_tensorize(__A )
for column_name in batch:
A = self._consolidate(batch[column_name] )
return batch
| 711
|
"""simple docstring"""
from __future__ import annotations
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
if not nums:
return 0
A = nums[0]
A = 0
for num in nums[1:]:
A , A = (
max_excluding + num,
max(UpperCamelCase__ , UpperCamelCase__ ),
)
return max(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91
| 0
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
lowerCamelCase__ : str = True
from torch.cuda.amp import autocast
lowerCamelCase__ : List[Any] = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
'''simple docstring'''
__lowercase : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
__lowercase : Optional[bool] = field(
default=snake_case_ ,metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
__lowercase : Optional[bool] = field(
default=snake_case_ ,metadata={'help': 'Whether to log verbose messages or not.'} ,)
__lowercase : Optional[float] = field(
default=2.0 ,metadata={'help': 'Maximum temperature for gumbel softmax.'} )
__lowercase : Optional[float] = field(
default=0.5 ,metadata={'help': 'Minimum temperature for gumbel softmax.'} )
__lowercase : Optional[float] = field(
default=0.99_99_95 ,metadata={'help': 'Decay of gumbel temperature during training.'} )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
snake_case__ = logging.WARNING
if model_args.verbose_logging:
snake_case__ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
snake_case__ = logging.INFO
logger.setLevel(__lowerCAmelCase )
@dataclass
class __magic_name__ :
'''simple docstring'''
__lowercase : str = field(
default=snake_case_ ,metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(
default='train' ,metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} ,)
__lowercase : Optional[str] = field(
default='validation' ,metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} ,)
__lowercase : Optional[str] = field(
default='file' ,metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} ,)
__lowercase : bool = field(
default=snake_case_ ,metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
__lowercase : Optional[int] = field(
default=1 ,metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} ,)
__lowercase : Optional[int] = field(
default=snake_case_ ,metadata={'help': 'The number of processes to use for the preprocessing.'} ,)
__lowercase : Optional[float] = field(
default=20.0 ,metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class __magic_name__ :
'''simple docstring'''
__lowercase : WavaVecaForPreTraining
__lowercase : WavaVecaFeatureExtractor
__lowercase : Union[bool, str] = "longest"
__lowercase : Optional[int] = None
__lowercase : Optional[int] = None
def __call__( self:Optional[Any] , _a:List[Dict[str, Union[List[int], torch.Tensor]]] ):
# reformat list to dict and set to pytorch format
snake_case__ = self.feature_extractor.pad(
_a , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
snake_case__ = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
snake_case__ = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
snake_case__ = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
snake_case__ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
snake_case__ = 1
snake_case__ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
snake_case__ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_a , min_masks=2 , )
return batch
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:str , *_a:List[Any] , _a:Dict=1 , _a:List[str]=0 , _a:Union[str, Any]=1.0 , **_a:Optional[Any] ):
super().__init__(*_a , **_a )
snake_case__ = 0
snake_case__ = max_gumbel_temp
snake_case__ = min_gumbel_temp
snake_case__ = gumbel_temp_decay
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:nn.Module , _a:Dict[str, Union[torch.Tensor, Any]] ):
model.train()
snake_case__ = self._prepare_inputs(_a )
if self.use_amp:
with autocast():
snake_case__ = self.compute_loss(_a , _a )
else:
snake_case__ = self.compute_loss(_a , _a )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
snake_case__ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
snake_case__ = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
snake_case__ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_a ).backward()
elif self.use_apex:
with amp.scale_loss(_a , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_a )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case__ , snake_case__ , snake_case__ = parser.parse_args_into_dataclasses()
configure_logger(__lowerCAmelCase , __lowerCAmelCase )
# Downloading and loading a dataset from the hub.
snake_case__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
snake_case__ = DatasetDict()
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
snake_case__ = DatasetDict()
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
snake_case__ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__lowerCAmelCase )
def prepare_dataset(__lowerCAmelCase ):
# check that all files have the correct sampling rate
snake_case__ , snake_case__ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
snake_case__ = datasets.map(
__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
snake_case__ = vectorized_datasets.filter(
lambda __lowerCAmelCase : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__lowerCAmelCase ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
snake_case__ = vectorized_datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
snake_case__ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
snake_case__ = WavaVecaForPreTraining(__lowerCAmelCase )
snake_case__ = DataCollatorForWavaVecaPretraining(model=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
snake_case__ = WavaVecaPreTrainer(
model=__lowerCAmelCase , data_collator=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=__lowerCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 33
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __snake_case ( _UpperCamelCase ) -> str:
_a = model.config
_a = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
_a = MBartConfig(
is_decoder=_UpperCamelCase , is_encoder_decoder=_UpperCamelCase , add_cross_attention=_UpperCamelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=_UpperCamelCase , add_final_layer_norm=_UpperCamelCase , )
return encoder_config, decoder_config
def __snake_case ( _UpperCamelCase ) -> Dict:
if "encoder.model" in name:
_a = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
_a = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
_a = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_a = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
_a = '''encoder.''' + name
if "attn.proj" in name:
_a = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
_a = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_a = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_a = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_a = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_a = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
_a = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
_a = '''encoder.layernorm.bias'''
return name
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> Dict:
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(_UpperCamelCase )
if "qkv" in key:
_a = key.split('''.''' )
_a = int(key_split[3] )
_a = int(key_split[5] )
_a = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_a = val
return orig_state_dict
def __snake_case ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=False ) -> Optional[int]:
# load original model
_a = DonutModel.from_pretrained(_UpperCamelCase ).eval()
# load HuggingFace model
_a , _a = get_configs(_UpperCamelCase )
_a = DonutSwinModel(_UpperCamelCase )
_a = MBartForCausalLM(_UpperCamelCase )
_a = VisionEncoderDecoderModel(encoder=_UpperCamelCase , decoder=_UpperCamelCase )
model.eval()
_a = original_model.state_dict()
_a = convert_state_dict(_UpperCamelCase , _UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
# verify results on scanned document
_a = load_dataset('''hf-internal-testing/example-documents''' )
_a = dataset['''test'''][0]['''image'''].convert('''RGB''' )
_a = XLMRobertaTokenizerFast.from_pretrained(_UpperCamelCase , from_slow=_UpperCamelCase )
_a = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_a = DonutProcessor(_UpperCamelCase , _UpperCamelCase )
_a = processor(_UpperCamelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_a = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_a = '''When is the coffee break?'''
_a = task_prompt.replace('''{user_input}''' , _UpperCamelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_a = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_a = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_a = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_a = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_a = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
_a = original_model.decoder.tokenizer(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_tensors='''pt''' )[
'''input_ids'''
]
_a = original_model.encoder.model.patch_embed(_UpperCamelCase )
_a , _a = model.encoder.embeddings(_UpperCamelCase )
assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 )
# verify encoder hidden states
_a = original_model.encoder(_UpperCamelCase )
_a = model.encoder(_UpperCamelCase ).last_hidden_state
assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-2 )
# verify decoder hidden states
_a = original_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).logits
_a = model(_UpperCamelCase , decoder_input_ids=_UpperCamelCase ).logits
assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
lowerCamelCase :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
lowerCamelCase :Dict = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 487
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_UpperCamelCase : List[str] =R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(__snake_case )
class UpperCAmelCase__ ( __snake_case ):
__snake_case : str = "rag"
__snake_case : List[str] = True
def __init__( self ,A__=None ,A__=True ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=" / " ,A__=" // " ,A__=5 ,A__=300 ,A__=768 ,A__=8 ,A__="wiki_dpr" ,A__="train" ,A__="compressed" ,A__=None ,A__=None ,A__=False ,A__=False ,A__=0.0 ,A__=True ,A__=False ,A__=False ,A__=False ,A__=True ,A__=None ,**A__ ,):
super().__init__(
bos_token_id=A__ ,pad_token_id=A__ ,eos_token_id=A__ ,decoder_start_token_id=A__ ,forced_eos_token_id=A__ ,is_encoder_decoder=A__ ,prefix=A__ ,vocab_size=A__ ,**A__ ,)
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_A : Union[str, Any] = kwargs.pop('''question_encoder''' )
_A : int = question_encoder_config.pop('''model_type''' )
_A : int = kwargs.pop('''generator''' )
_A : List[Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_A : Dict = AutoConfig.for_model(A__ ,**A__ )
_A : Optional[Any] = AutoConfig.for_model(A__ ,**A__ )
_A : int = reduce_loss
_A : Tuple = label_smoothing
_A : Any = exclude_bos_score
_A : List[Any] = do_marginalize
_A : Any = title_sep
_A : Union[str, Any] = doc_sep
_A : Dict = n_docs
_A : Tuple = max_combined_length
_A : Optional[int] = dataset
_A : Union[str, Any] = dataset_split
_A : Any = index_name
_A : Optional[int] = retrieval_vector_size
_A : Tuple = retrieval_batch_size
_A : Union[str, Any] = passages_path
_A : List[str] = index_path
_A : Union[str, Any] = use_dummy_dataset
_A : int = output_retrieved
_A : List[Any] = do_deduplication
_A : Tuple = use_cache
if self.forced_eos_token_id is None:
_A : List[Any] = getattr(self.generator ,'''forced_eos_token_id''' ,A__ )
@classmethod
def A__ ( cls ,A__ ,A__ ,**A__ ):
return cls(question_encoder=question_encoder_config.to_dict() ,generator=generator_config.to_dict() ,**A__ )
def A__ ( self ):
_A : int = copy.deepcopy(self.__dict__ )
_A : Union[str, Any] = self.question_encoder.to_dict()
_A : Union[str, Any] = self.generator.to_dict()
_A : Dict = self.__class__.model_type
return output
| 714
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
_UpperCamelCase : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(__snake_case )
class UpperCAmelCase__ ( __snake_case ):
def __init__( self ,*A__ ,**A__ ):
super().__init__(*A__ ,**A__ )
requires_backends(self ,'''vision''' )
self.check_model_type(A__ )
def __call__( self ,A__ ,**A__ ):
return super().__call__(A__ ,**A__ )
def A__ ( self ,**A__ ):
return {}, {}, {}
def A__ ( self ,A__ ):
_A : Optional[int] = load_image(A__ )
_A : List[Any] = image.size
_A : Any = self.image_processor(images=A__ ,return_tensors=self.framework )
return model_inputs
def A__ ( self ,A__ ):
_A : Tuple = self.model(**A__ )
return model_outputs
def A__ ( self ,A__ ):
_A : Tuple = model_outputs.predicted_depth
_A : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) ,size=self.image_size[::-1] ,mode='''bicubic''' ,align_corners=A__ )
_A : Tuple = prediction.squeeze().cpu().numpy()
_A : Any = (output * 255 / np.max(A__ )).astype('''uint8''' )
_A : List[str] = Image.fromarray(A__ )
_A : Optional[int] = {}
_A : Any = predicted_depth
_A : Optional[Any] = depth
return output_dict
| 332
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __UpperCamelCase :
lowercase : Optional[str] =field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
lowercase : Optional[str] =field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
lowercase : Optional[str] =field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
lowercase : Optional[str] =field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowercase : Optional[int] =field(default=2 , metadata={'help': 'Batch size for training.'} )
lowercase : Optional[int] =field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
lowercase : Optional[float] =field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
lowercase : Optional[int] =field(
default=1_00_00 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
lowercase : Optional[float] =field(default=2E-4 , metadata={'help': 'Learning rate fo training.'} )
lowercase : Optional[str] =field(default='cosine' , metadata={'help': 'Learning rate.'} )
lowercase : Optional[int] =field(
default=7_50 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
lowercase : Optional[int] =field(
default=16 , metadata={'help': 'Number of gradient accumulation steps.'} )
lowercase : Optional[bool] =field(
default=__snake_case , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
lowercase : Optional[int] =field(default=5_00_00 , metadata={'help': 'Maximum number of training steps.'} )
lowercase : Optional[int] =field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowercase : Optional[int] =field(default=10_24 , metadata={'help': 'Sequence lengths used for training.'} )
lowercase : Optional[int] =field(default=1 , metadata={'help': 'Training seed.'} )
lowercase : Optional[int] =field(
default=10_24 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
lowercase : Optional[str] =field(
default=__snake_case , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
lowercase : Optional[bool] =field(default=__snake_case , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class __UpperCamelCase :
lowercase : Optional[str] =field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowercase : Optional[str] =field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowercase : Optional[int] =field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
lowercase : Optional[int] =field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowercase : Optional[int] =field(default=10_24 , metadata={'help': 'Length of sequences to be evaluated.'} )
lowercase : Optional[int] =field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class __UpperCamelCase :
lowercase : Optional[str] =field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowercase : Optional[int] =field(default=__snake_case , metadata={'help': 'Number of workers used for code evaluation.'} )
lowercase : Optional[int] =field(
default=__snake_case , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
lowercase : Optional[bool] =field(
default=__snake_case , metadata={'help': 'Sample from the language model\'s output distribution.'} )
lowercase : Optional[float] =field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
lowercase : Optional[int] =field(default=2_56 , metadata={'help': 'Maximum number of newly generated tokens.'} )
lowercase : Optional[int] =field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
lowercase : Optional[float] =field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
lowercase : Optional[int] =field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} )
lowercase : Optional[int] =field(
default=2_00 , metadata={'help': 'Number of completions to generate for each sample.'} )
lowercase : Optional[int] =field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
lowercase : Optional[str] =field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
lowercase : Optional[str] =field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
lowercase : Optional[int] =field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class __UpperCamelCase :
lowercase : Optional[int] =field(
default=__snake_case , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
lowercase : Optional[str] =field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
lowercase : Optional[str] =field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
lowercase : Optional[int] =field(
default=10_00_00 , metadata={'help': 'Number of files to save per JSON output file.'} )
lowercase : Optional[str] =field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowercase : Optional[float] =field(
default=10_00 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
lowercase : Optional[float] =field(
default=1_00 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
lowercase : Optional[float] =field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
lowercase : Optional[float] =field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
lowercase : Optional[float] =field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
lowercase : Optional[str] =field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
lowercase : Optional[bool] =field(
default=__snake_case , metadata={'help': 'If True, near-duplicate samples are removed.'} )
lowercase : Optional[float] =field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class __UpperCamelCase :
lowercase : Optional[str] =field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
lowercase : Optional[str] =field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
lowercase : Optional[str] =field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowercase : Optional[int] =field(default=20_00_00 , metadata={'help': 'Number of examples to train tokenizer on.'} )
lowercase : Optional[int] =field(
default=3_27_68 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
lowercase : Optional[str] =field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
lowercase : Optional[bool] =field(default=__snake_case , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class __UpperCamelCase :
lowercase : Optional[str] =field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
lowercase : Optional[str] =field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
lowercase : Optional[str] =field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
lowercase : Optional[int] =field(default=__snake_case , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class __UpperCamelCase :
lowercase : Optional[str] =field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
lowercase : Optional[str] =field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
lowercase : Optional[str] =field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
lowercase : Optional[bool] =field(default=__snake_case , metadata={'help': 'Push saved tokenizer to the hub.'} )
| 676
|
import math
from datetime import datetime, timedelta
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Union[str, Any] = year % 19
snake_case__ : Tuple = year % 4
snake_case__ : Any = year % 7
snake_case__ : Any = math.floor(year / 100)
snake_case__ : str = math.floor((13 + 8 * leap_day_inhibits) / 25)
snake_case__ : Any = leap_day_inhibits / 4
snake_case__ : str = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
snake_case__ : Tuple = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
snake_case__ : Tuple = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
snake_case__ : Any = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(UpperCAmelCase_ , 4 , 19)
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(UpperCAmelCase_ , 4 , 18)
else:
return datetime(UpperCAmelCase_ , 3 , 22) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday))
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
lowercase_: str = 'will be' if year > datetime.now().year else 'was'
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 648
| 0
|
'''simple docstring'''
import math
def a ( UpperCamelCase_ : Any ) -> bool:
return math.sqrt(lowerCamelCase__ ) * math.sqrt(lowerCamelCase__ ) == num
def a ( UpperCamelCase_ : Union[str, Any] ) -> bool:
snake_case__ =0
snake_case__ =n
while left <= right:
snake_case__ =(left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
snake_case__ =mid - 1
else:
snake_case__ =mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class a__:
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=99 , _UpperCAmelCase=0 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase="last" , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=0 , ) -> Optional[int]:
snake_case__ =parent
snake_case__ =batch_size
snake_case__ =seq_length
snake_case__ =is_training
snake_case__ =use_input_lengths
snake_case__ =use_token_type_ids
snake_case__ =use_labels
snake_case__ =gelu_activation
snake_case__ =sinusoidal_embeddings
snake_case__ =causal
snake_case__ =asm
snake_case__ =n_langs
snake_case__ =vocab_size
snake_case__ =n_special
snake_case__ =hidden_size
snake_case__ =num_hidden_layers
snake_case__ =num_attention_heads
snake_case__ =hidden_dropout_prob
snake_case__ =attention_probs_dropout_prob
snake_case__ =max_position_embeddings
snake_case__ =type_sequence_label_size
snake_case__ =initializer_range
snake_case__ =num_labels
snake_case__ =num_choices
snake_case__ =summary_type
snake_case__ =use_proj
snake_case__ =scope
snake_case__ =bos_token_id
def _lowercase ( self ) -> Any:
snake_case__ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ =random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ =None
if self.use_input_lengths:
snake_case__ =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case__ =None
if self.use_token_type_ids:
snake_case__ =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case__ =None
snake_case__ =None
snake_case__ =None
if self.use_labels:
snake_case__ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ =ids_tensor([self.batch_size] , 2 ).float()
snake_case__ =ids_tensor([self.batch_size] , self.num_choices )
snake_case__ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowercase ( self ) -> Union[str, Any]:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Tuple:
snake_case__ =XLMModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase , lengths=_UpperCAmelCase , langs=_UpperCAmelCase )
snake_case__ =model(_UpperCAmelCase , langs=_UpperCAmelCase )
snake_case__ =model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> str:
snake_case__ =XLMWithLMHeadModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> str:
snake_case__ =XLMForQuestionAnsweringSimple(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase )
snake_case__ =model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
snake_case__ =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Dict:
snake_case__ =XLMForQuestionAnswering(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase )
snake_case__ =model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , p_mask=_UpperCAmelCase , )
snake_case__ =model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , )
((snake_case__) , ) =result_with_labels.to_tuple()
snake_case__ =model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
((snake_case__) , ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Any:
snake_case__ =XLMForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase )
snake_case__ =model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[Any]:
snake_case__ =self.num_labels
snake_case__ =XLMForTokenClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> int:
snake_case__ =self.num_choices
snake_case__ =XLMForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ =model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self ) -> str:
snake_case__ =self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) =config_and_inputs
snake_case__ ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class a__( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
a_ : Optional[int] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
a_ : Optional[Any] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
a_ : Any = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> str:
snake_case__ =super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
snake_case__ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
snake_case__ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def _lowercase ( self ) -> Optional[int]:
snake_case__ =XLMModelTester(self )
snake_case__ =ConfigTester(self , config_class=_UpperCAmelCase , emb_dim=37 )
def _lowercase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _lowercase ( self ) -> int:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_UpperCAmelCase )
def _lowercase ( self ) -> Dict:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_UpperCAmelCase )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_UpperCAmelCase )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_UpperCAmelCase )
def _lowercase ( self ) -> str:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_UpperCAmelCase )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_UpperCAmelCase )
def _lowercase ( self ) -> str:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_UpperCAmelCase )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=1 ) -> Dict:
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(
[isinstance(_UpperCAmelCase , _UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(_UpperCAmelCase ) )
self.assertEqual(len(_UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_UpperCAmelCase ):
# adds PAD dummy token
snake_case__ =min_length + idx + 1
snake_case__ =min_length + idx + 1
snake_case__ =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_UpperCAmelCase ) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=1 ) -> int:
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(
[isinstance(_UpperCAmelCase , _UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(_UpperCAmelCase ) , )
self.assertEqual(len(_UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_UpperCAmelCase ):
# adds PAD dummy token
snake_case__ =min_length + idx + 1
snake_case__ =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_UpperCAmelCase ) , )
pass
@slow
def _lowercase ( self ) -> Dict:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ =XLMModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class a__( unittest.TestCase ):
@slow
def _lowercase ( self ) -> str:
snake_case__ =XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(_UpperCAmelCase )
snake_case__ =torch.tensor([[14, 447]] , dtype=torch.long , device=_UpperCAmelCase ) # the president
snake_case__ =[
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
snake_case__ =model.generate(_UpperCAmelCase , do_sample=_UpperCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _UpperCAmelCase )
| 581
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'philschmid/bart-large-cnn-samsum'
UpperCamelCase__ = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
UpperCamelCase__ = 'summarizer'
UpperCamelCase__ = AutoTokenizer
UpperCamelCase__ = AutoModelForSeqaSeqLM
UpperCamelCase__ = ['text']
UpperCamelCase__ = ['text']
def _A( self , snake_case_ ):
return self.pre_processor(snake_case_ , return_tensors='''pt''' , truncation=snake_case_ )
def _A( self , snake_case_ ):
return self.model.generate(**snake_case_ )[0]
def _A( self , snake_case_ ):
return self.pre_processor.decode(snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
| 72
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_UpperCAmelCase : Any = logging.get_logger(__name__)
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 72
| 1
|
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ) -> List[str]:
super().__init__()
a : Union[str, Any] = nn.ModuleList(__UpperCAmelCase )
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(__UpperCAmelCase , __UpperCAmelCase , self.nets ) ):
a , a : int = controlnet(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
# merge samples
if i == 0:
a , a : Tuple = down_samples, mid_sample
else:
a : List[str] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__UpperCAmelCase , __UpperCAmelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Optional[int]:
a : int = 0
a : List[Any] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__UpperCAmelCase , is_main_process=__UpperCAmelCase , save_function=__UpperCAmelCase , safe_serialization=__UpperCAmelCase , variant=__UpperCAmelCase , )
idx += 1
a : List[str] = model_path_to_save + f'_{idx}'
@classmethod
def lowercase_ ( cls , __UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
a : Union[str, Any] = 0
a : Optional[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
a : Union[str, Any] = pretrained_model_path
while os.path.isdir(__UpperCAmelCase ):
a : int = ControlNetModel.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
controlnets.append(__UpperCAmelCase )
idx += 1
a : Any = pretrained_model_path + f'_{idx}'
logger.info(f'{len(__UpperCAmelCase )} controlnets loaded from {pretrained_model_path}.' )
if len(__UpperCAmelCase ) == 0:
raise ValueError(
f'No ControlNets found under {os.path.dirname(__UpperCAmelCase )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(__UpperCAmelCase )
| 509
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None:
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 509
| 1
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
snake_case_ : Any = logging.get_logger(__name__)
class snake_case_ :
'''simple docstring'''
def __init__( self : Tuple , __magic_name__ : List[Any] = None , __magic_name__ : Union[str, Any] = None , __magic_name__ : List[str]=None , __magic_name__ : Any=None ) -> str:
if not conversation_id:
lowerCamelCase_ : Any = uuid.uuida()
if past_user_inputs is None:
lowerCamelCase_ : Optional[int] = []
if generated_responses is None:
lowerCamelCase_ : Dict = []
lowerCamelCase_ : uuid.UUID = conversation_id
lowerCamelCase_ : List[str] = past_user_inputs
lowerCamelCase_ : List[str] = generated_responses
lowerCamelCase_ : Optional[str] = text
def __eq__( self : Optional[Any] , __magic_name__ : List[Any] ) -> Dict:
if not isinstance(A_ , A_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] = False ) -> Optional[int]:
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
lowerCamelCase_ : Tuple = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
lowerCamelCase_ : int = text
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowerCamelCase_ : Dict = None
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Dict ) -> List[str]:
self.generated_responses.append(A_ )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Optional[Any] ) -> Union[str, Any]:
lowerCamelCase_ : List[str] = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
lowerCamelCase_ : Any = "user" if is_user else "bot"
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
__A , R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : str , *__magic_name__ : Any , **__magic_name__ : int ) -> str:
super().__init__(*A_ , **A_ )
if self.tokenizer.pad_token_id is None:
lowerCamelCase_ : Union[str, Any] = self.tokenizer.eos_token
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : int=None , __magic_name__ : Optional[int]=None , __magic_name__ : Dict=None , **__magic_name__ : Optional[Any] ) -> Optional[Any]:
lowerCamelCase_ : Tuple = {}
lowerCamelCase_ : Dict = {}
lowerCamelCase_ : Optional[int] = {}
if min_length_for_response is not None:
lowerCamelCase_ : Optional[Any] = min_length_for_response
if minimum_tokens is not None:
lowerCamelCase_ : Any = minimum_tokens
if "max_length" in generate_kwargs:
lowerCamelCase_ : Dict = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowerCamelCase_ : int = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(A_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any]=0 , **__magic_name__ : Union[str, Any] ) -> Any:
lowerCamelCase_ : str = super().__call__(A_ , num_workers=A_ , **A_ )
if isinstance(A_ , A_ ) and len(A_ ) == 1:
return outputs[0]
return outputs
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any]=32 ) -> Optional[Any]:
if not isinstance(A_ , A_ ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"Add user inputs with the conversation's `add_user_input` method" )
if hasattr(self.tokenizer , "_build_conversation_input_ids" ):
lowerCamelCase_ : Optional[Any] = self.tokenizer._build_conversation_input_ids(A_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowerCamelCase_ : Optional[int] = self._legacy_parse_and_tokenize(A_ )
if self.framework == "pt":
lowerCamelCase_ : List[str] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowerCamelCase_ : str = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : str=10 , **__magic_name__ : Optional[int] ) -> Optional[Any]:
lowerCamelCase_ : List[str] = generate_kwargs.get("max_length" , self.model.config.max_length )
lowerCamelCase_ : List[Any] = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
lowerCamelCase_ : int = max_length - minimum_tokens
lowerCamelCase_ : Optional[int] = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
lowerCamelCase_ : Union[str, Any] = model_inputs["attention_mask"][:, -trim:]
lowerCamelCase_ : Optional[int] = model_inputs.pop("conversation" )
lowerCamelCase_ : Union[str, Any] = max_length
lowerCamelCase_ : Any = self.model.generate(**A_ , **A_ )
if self.model.config.is_encoder_decoder:
lowerCamelCase_ : Union[str, Any] = 1
else:
lowerCamelCase_ : List[str] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Tuple , __magic_name__ : str=True ) -> List[str]:
lowerCamelCase_ : Optional[Any] = model_outputs["output_ids"]
lowerCamelCase_ : List[Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
lowerCamelCase_ : Any = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(A_ )
return conversation
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[Any] ) -> str:
lowerCamelCase_ : str = self.tokenizer.eos_token_id
lowerCamelCase_ : Tuple = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) )
if len(A_ ) > self.tokenizer.model_max_length:
lowerCamelCase_ : str = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 488
|
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a :
def __init__( self , A_ , A_=2 , A_=3 , A_=4 , A_=2 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=36 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=6 , A_=6 , A_=3 , A_=4 , A_=None , A_=1000 , ):
'''simple docstring'''
_UpperCAmelCase : List[str] = parent
_UpperCAmelCase : List[str] = batch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Dict = image_size
_UpperCAmelCase : Optional[Any] = patch_size
_UpperCAmelCase : Optional[int] = is_training
_UpperCAmelCase : Dict = use_input_mask
_UpperCAmelCase : str = use_token_type_ids
_UpperCAmelCase : Optional[Any] = use_labels
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Dict = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Dict = hidden_dropout_prob
_UpperCAmelCase : int = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = max_position_embeddings
_UpperCAmelCase : Dict = type_vocab_size
_UpperCAmelCase : Dict = type_sequence_label_size
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : int = coordinate_size
_UpperCAmelCase : Any = shape_size
_UpperCAmelCase : Optional[int] = num_labels
_UpperCAmelCase : Optional[Any] = num_choices
_UpperCAmelCase : int = scope
_UpperCAmelCase : str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCAmelCase : int = text_seq_length
_UpperCAmelCase : Dict = (image_size // patch_size) ** 2 + 1
_UpperCAmelCase : Dict = self.text_seq_length + self.image_seq_length
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
_UpperCAmelCase : List[Any] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase : int = bbox[i, j, 3]
_UpperCAmelCase : Union[str, Any] = bbox[i, j, 1]
_UpperCAmelCase : Optional[Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase : Dict = bbox[i, j, 2]
_UpperCAmelCase : str = bbox[i, j, 0]
_UpperCAmelCase : Optional[Any] = tmp_coordinate
_UpperCAmelCase : List[Any] = tf.constant(A_ )
_UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : List[str] = None
if self.use_input_mask:
_UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.text_seq_length] )
_UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : Dict = None
if self.use_labels:
_UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_UpperCAmelCase : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Any = TFLayoutLMvaModel(config=A_ )
# text + image
_UpperCAmelCase : Union[str, Any] = model(A_ , pixel_values=A_ , training=A_ )
_UpperCAmelCase : List[Any] = model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , training=A_ , )
_UpperCAmelCase : Union[str, Any] = model(A_ , bbox=A_ , pixel_values=A_ , training=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_UpperCAmelCase : Optional[int] = model(A_ , training=A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_UpperCAmelCase : List[str] = model({"pixel_values": pixel_values} , training=A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.num_labels
_UpperCAmelCase : str = TFLayoutLMvaForSequenceClassification(config=A_ )
_UpperCAmelCase : Any = model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , training=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = self.num_labels
_UpperCAmelCase : Optional[int] = TFLayoutLMvaForTokenClassification(config=A_ )
_UpperCAmelCase : List[str] = model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , training=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = 2
_UpperCAmelCase : Union[str, Any] = TFLayoutLMvaForQuestionAnswering(config=A_ )
_UpperCAmelCase : int = model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , training=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = config_and_inputs
_UpperCAmelCase : Tuple = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_lowercase = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowercase = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
return True
def _UpperCAmelCase ( self , A_ , A_ , A_=False ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = copy.deepcopy(A_ )
if model_class in get_values(A_ ):
_UpperCAmelCase : Union[str, Any] = {
k: tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(A_ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A_ ):
_UpperCAmelCase : List[str] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(A_ ):
_UpperCAmelCase : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
_UpperCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(A_ ):
_UpperCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(A_ ):
_UpperCAmelCase : Tuple = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = TFLayoutLMvaModelTester(self )
_UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=A_ , hidden_size=37 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(A_ )
if getattr(A_ , "hf_compute_loss" , A_ ):
# The number of elements in the loss should be the same as the number of elements in the label
_UpperCAmelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , A_ , return_labels=A_ )
_UpperCAmelCase : str = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=A_ )[0]
]
_UpperCAmelCase : List[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_UpperCAmelCase : Tuple = self._prepare_for_class(inputs_dict.copy() , A_ , return_labels=A_ )
_UpperCAmelCase : str = prepared_for_class.pop("input_ids" )
_UpperCAmelCase : Any = model(A_ , **A_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
_UpperCAmelCase : List[str] = self._prepare_for_class(inputs_dict.copy() , A_ , return_labels=A_ )
_UpperCAmelCase : Any = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
_UpperCAmelCase : Union[str, Any] = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
_UpperCAmelCase : str = -100
_UpperCAmelCase : Any = tf.convert_to_tensor(A_ )
_UpperCAmelCase : int = model(A_ , **A_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
_UpperCAmelCase : Any = self._prepare_for_class(inputs_dict.copy() , A_ , return_labels=A_ )
_UpperCAmelCase : str = model(A_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
_UpperCAmelCase : Dict = self._prepare_for_class(inputs_dict.copy() , A_ , return_labels=A_ )
# Get keys that were added with the _prepare_for_class function
_UpperCAmelCase : Tuple = prepared_for_class.keys() - inputs_dict.keys()
_UpperCAmelCase : Union[str, Any] = inspect.signature(model.call ).parameters
_UpperCAmelCase : List[str] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
_UpperCAmelCase : str = {0: "input_ids"}
for label_key in label_keys:
_UpperCAmelCase : Tuple = signature_names.index(A_ )
_UpperCAmelCase : str = label_key
_UpperCAmelCase : Union[str, Any] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
_UpperCAmelCase : Optional[int] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
_UpperCAmelCase : str = prepared_for_class[value]
_UpperCAmelCase : List[str] = tuple(A_ )
# Send to model
_UpperCAmelCase : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _UpperCAmelCase ( self ):
'''simple docstring'''
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A_ , A_ , A_ , A_ , A_ , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase : Dict = type
self.model_tester.create_and_check_model(A_ , A_ , A_ , A_ , A_ , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
A_ , A_ , A_ , A_ , A_ , A_ , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
A_ , A_ , A_ , A_ , A_ , A_ , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
A_ , A_ , A_ , A_ , A_ , A_ , A_ )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Dict = TFLayoutLMvaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def __SCREAMING_SNAKE_CASE ( ) -> Any:
_UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class a ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=A_ ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
_UpperCAmelCase : Any = self.default_image_processor
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : Any = image_processor(images=A_ , return_tensors="tf" ).pixel_values
_UpperCAmelCase : Tuple = tf.constant([[1, 2]] )
_UpperCAmelCase : Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
_UpperCAmelCase : Union[str, Any] = model(input_ids=A_ , bbox=A_ , pixel_values=A_ , training=A_ )
# verify the logits
_UpperCAmelCase : Optional[Any] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , A_ )
_UpperCAmelCase : Dict = tf.constant(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , A_ , atol=1e-4 ) )
| 300
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class __lowerCAmelCase ( _A ):
_UpperCamelCase : str = """mra"""
def __init__( self , snake_case=50_265 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3_072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=1 , snake_case=0.02 , snake_case=1E-5 , snake_case="absolute" , snake_case=4 , snake_case="full" , snake_case=0 , snake_case=0 , snake_case=1 , snake_case=0 , snake_case=2 , **snake_case , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
a__ : List[str] = vocab_size
a__ : Dict = max_position_embeddings
a__ : Dict = hidden_size
a__ : Any = num_hidden_layers
a__ : List[Any] = num_attention_heads
a__ : Dict = intermediate_size
a__ : str = hidden_act
a__ : Optional[Any] = hidden_dropout_prob
a__ : int = attention_probs_dropout_prob
a__ : Union[str, Any] = initializer_range
a__ : str = type_vocab_size
a__ : Any = layer_norm_eps
a__ : int = position_embedding_type
a__ : Dict = block_per_row
a__ : Dict = approx_mode
a__ : int = initial_prior_first_n_blocks
a__ : List[str] = initial_prior_diagonal_n_blocks
| 710
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/bart-tiny-random"""
SCREAMING_SNAKE_CASE__ : Tuple = """patrickvonplaten/t5-tiny-random"""
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(snake_case )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ , *a__ : Any = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ , *a__ : List[str] = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _snake_case ( self ) -> int:
"""simple docstring"""
with self.assertRaises(snake_case ):
create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=snake_case , d=snake_case )
| 629
| 0
|
from __future__ import annotations
from random import choice
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> Optional[Any]:
return choice(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ,a__ : int ) -> int:
__A : List[str] = random_pivot(a__ )
# partition based on pivot
# linear time
__A : Optional[int] = [e for e in lst if e < pivot]
__A : Tuple = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a__ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a__ ) < k - 1:
return kth_number(a__ ,k - len(a__ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(a__ ,a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple )-> str:
'''simple docstring'''
UpperCAmelCase__ : List[str] = len(snake_case )
for i in range(length - 1 ):
UpperCAmelCase__ : Any = i
for k in range(i + 1 , snake_case ):
if collection[k] < collection[least]:
UpperCAmelCase__ : Union[str, Any] = k
if least != i:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
_lowerCAmelCase : List[str] = input("""Enter numbers separated by a comma:\n""").strip()
_lowerCAmelCase : Optional[int] = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 438
| 0
|
def A__ ( __A : float ) ->float:
return 10 - x * x
def A__ ( __A : float , __A : float ) ->float:
# Bolzano theory in order to find if there is a root between a and b
if equation(__A ) * equation(__A ) >= 0:
raise ValueError('''Wrong space!''' )
__A =a
while (b - a) >= 0.01:
# Find middle point
__A =(a + b) / 2
# Check if middle point is root
if equation(__A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__A ) * equation(__A ) < 0:
__A =c
else:
__A =c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 516
|
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
def A__ ( *__A : Optional[int] , **__A : Union[str, Any] ) ->Dict:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : str , **__A : int ) ->List[Any]:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : Dict , **__A : str ) ->Tuple:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : Optional[Any] , **__A : Dict ) ->Optional[Any]:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : str , **__A : str ) ->Any:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : List[Any] , **__A : Dict ) ->Union[str, Any]:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : Optional[int] , **__A : Optional[int] ) ->Any:
requires_backends(__A , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
| 516
| 1
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __A( unittest.TestCase ):
def lowercase__ ( self : Any ):
lowerCamelCase_ = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
lowerCamelCase_ = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
lowerCamelCase_ = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
lowerCamelCase_ = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_6_0_0_0,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase_ = os.path.join(self.tmpdirname , __UpperCamelCase )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + """\n""" )
# load decoder from hub
lowerCamelCase_ = """hf-internal-testing/ngram-beam-search-decoder"""
def lowercase__ ( self : List[str] , **__UpperCamelCase : Tuple ):
lowerCamelCase_ = self.add_kwargs_tokens_map.copy()
kwargs.update(__UpperCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowercase__ ( self : str , **__UpperCamelCase : int ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] , **__UpperCamelCase : str ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__UpperCamelCase )
def lowercase__ ( self : Any ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : Tuple ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_feature_extractor()
lowerCamelCase_ = self.get_decoder()
lowerCamelCase_ = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __UpperCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __UpperCamelCase )
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
lowerCamelCase_ = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowercase__ ( self : int ):
lowerCamelCase_ = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__UpperCamelCase , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__UpperCamelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowercase__ ( self : str ):
lowerCamelCase_ = self.get_feature_extractor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_decoder()
lowerCamelCase_ = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
lowerCamelCase_ = floats_list((3, 1_0_0_0) )
lowerCamelCase_ = feature_extractor(__UpperCamelCase , return_tensors="""np""" )
lowerCamelCase_ = processor(__UpperCamelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : int ):
lowerCamelCase_ = self.get_feature_extractor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_decoder()
lowerCamelCase_ = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
lowerCamelCase_ = """This is a test string"""
lowerCamelCase_ = processor(text=__UpperCamelCase )
lowerCamelCase_ = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Dict=(2, 1_0, 1_6) , __UpperCamelCase : Optional[int]=7_7 ):
np.random.seed(__UpperCamelCase )
return np.random.rand(*__UpperCamelCase )
def lowercase__ ( self : Any ):
lowerCamelCase_ = self.get_feature_extractor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_decoder()
lowerCamelCase_ = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
lowerCamelCase_ = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
lowerCamelCase_ = processor.decode(__UpperCamelCase )
lowerCamelCase_ = decoder.decode_beams(__UpperCamelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowercase__ ( self : Tuple , __UpperCamelCase : str ):
lowerCamelCase_ = self.get_feature_extractor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_decoder()
lowerCamelCase_ = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
lowerCamelCase_ = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
lowerCamelCase_ = processor.batch_decode(__UpperCamelCase )
else:
with get_context(__UpperCamelCase ).Pool() as pool:
lowerCamelCase_ = processor.batch_decode(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = list(__UpperCamelCase )
with get_context("""fork""" ).Pool() as p:
lowerCamelCase_ = decoder.decode_beams_batch(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__UpperCamelCase , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__UpperCamelCase , decoded_processor.logit_score )
self.assertListEqual(__UpperCamelCase , decoded_processor.lm_score )
def lowercase__ ( self : Tuple ):
lowerCamelCase_ = self.get_feature_extractor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_decoder()
lowerCamelCase_ = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
lowerCamelCase_ = self._get_dummy_logits()
lowerCamelCase_ = 1_5
lowerCamelCase_ = -20.0
lowerCamelCase_ = -4.0
lowerCamelCase_ = processor.batch_decode(
__UpperCamelCase , beam_width=__UpperCamelCase , beam_prune_logp=__UpperCamelCase , token_min_logp=__UpperCamelCase , )
lowerCamelCase_ = decoded_processor_out.text
lowerCamelCase_ = list(__UpperCamelCase )
with get_context("""fork""" ).Pool() as pool:
lowerCamelCase_ = decoder.decode_beams_batch(
__UpperCamelCase , __UpperCamelCase , beam_width=__UpperCamelCase , beam_prune_logp=__UpperCamelCase , token_min_logp=__UpperCamelCase , )
lowerCamelCase_ = [d[0][0] for d in decoded_decoder_out]
lowerCamelCase_ = [d[0][2] for d in decoded_decoder_out]
lowerCamelCase_ = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __UpperCamelCase )
self.assertTrue(np.array_equal(__UpperCamelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __UpperCamelCase , atol=1E-3 ) )
self.assertTrue(np.array_equal(__UpperCamelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __UpperCamelCase , atol=1E-3 ) )
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ = self.get_feature_extractor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_decoder()
lowerCamelCase_ = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
lowerCamelCase_ = self._get_dummy_logits()
lowerCamelCase_ = 2.0
lowerCamelCase_ = 5.0
lowerCamelCase_ = -20.0
lowerCamelCase_ = True
lowerCamelCase_ = processor.batch_decode(
__UpperCamelCase , alpha=__UpperCamelCase , beta=__UpperCamelCase , unk_score_offset=__UpperCamelCase , lm_score_boundary=__UpperCamelCase , )
lowerCamelCase_ = decoded_processor_out.text
lowerCamelCase_ = list(__UpperCamelCase )
decoder.reset_params(
alpha=__UpperCamelCase , beta=__UpperCamelCase , unk_score_offset=__UpperCamelCase , lm_score_boundary=__UpperCamelCase , )
with get_context("""fork""" ).Pool() as pool:
lowerCamelCase_ = decoder.decode_beams_batch(
__UpperCamelCase , __UpperCamelCase , )
lowerCamelCase_ = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __UpperCamelCase )
lowerCamelCase_ = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __UpperCamelCase )
def lowercase__ ( self : List[str] ):
lowerCamelCase_ = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
lowerCamelCase_ = processor.decoder.model_container[processor.decoder._model_key]
lowerCamelCase_ = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
lowerCamelCase_ = os.listdir(__UpperCamelCase )
lowerCamelCase_ = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : List[Any] ):
lowerCamelCase_ = snapshot_download("""hf-internal-testing/processor_with_lm""" )
lowerCamelCase_ = WavaVecaProcessorWithLM.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = processor.decoder.model_container[processor.decoder._model_key]
lowerCamelCase_ = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
lowerCamelCase_ = os.listdir(__UpperCamelCase )
lowerCamelCase_ = os.listdir(__UpperCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : List[str] ):
lowerCamelCase_ = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
lowerCamelCase_ = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
lowerCamelCase_ = floats_list((3, 1_0_0_0) )
lowerCamelCase_ = processor_wavaveca(__UpperCamelCase , return_tensors="""np""" )
lowerCamelCase_ = processor_auto(__UpperCamelCase , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
lowerCamelCase_ = self._get_dummy_logits()
lowerCamelCase_ = processor_wavaveca.batch_decode(__UpperCamelCase )
lowerCamelCase_ = processor_auto.batch_decode(__UpperCamelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowercase__ ( self : str ):
lowerCamelCase_ = self.get_feature_extractor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_decoder()
lowerCamelCase_ = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowercase__ ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ):
lowerCamelCase_ = [d[key] for d in offsets]
return retrieved_list
def lowercase__ ( self : Any ):
lowerCamelCase_ = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
lowerCamelCase_ = self._get_dummy_logits()[0]
lowerCamelCase_ = processor.decode(__UpperCamelCase , output_word_offsets=__UpperCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowercase__ ( self : Dict ):
lowerCamelCase_ = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
lowerCamelCase_ = self._get_dummy_logits()
lowerCamelCase_ = processor.batch_decode(__UpperCamelCase , output_word_offsets=__UpperCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__UpperCamelCase , __UpperCamelCase ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__UpperCamelCase , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowercase__ ( self : Dict ):
import torch
lowerCamelCase_ = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__UpperCamelCase )
lowerCamelCase_ = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
lowerCamelCase_ = iter(__UpperCamelCase )
lowerCamelCase_ = next(__UpperCamelCase )
lowerCamelCase_ = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
lowerCamelCase_ = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
lowerCamelCase_ = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
lowerCamelCase_ = model(__UpperCamelCase ).logits.cpu().numpy()
lowerCamelCase_ = processor.decode(logits[0] , output_word_offsets=__UpperCamelCase )
lowerCamelCase_ = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
lowerCamelCase_ = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
lowerCamelCase_ = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__UpperCamelCase , """word""" ) ) , __UpperCamelCase )
self.assertEqual(""" """.join(self.get_from_offsets(__UpperCamelCase , """word""" ) ) , output.text )
# output times
lowerCamelCase_ = torch.tensor(self.get_from_offsets(__UpperCamelCase , """start_time""" ) )
lowerCamelCase_ = torch.tensor(self.get_from_offsets(__UpperCamelCase , """end_time""" ) )
# fmt: off
lowerCamelCase_ = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
lowerCamelCase_ = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=0.01 ) )
| 272
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowercase = None
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
lowercase = {
'''facebook/mbart-large-en-ro''': 1_0_2_4,
'''facebook/mbart-large-cc25''': 1_0_2_4,
}
# fmt: off
lowercase = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE = MBartTokenizer
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
def __init__( self : Any , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : int=None , __UpperCamelCase : Dict="<s>" , __UpperCamelCase : Tuple="</s>" , __UpperCamelCase : Union[str, Any]="</s>" , __UpperCamelCase : Optional[Any]="<s>" , __UpperCamelCase : Optional[Any]="<unk>" , __UpperCamelCase : Dict="<pad>" , __UpperCamelCase : Dict="<mask>" , __UpperCamelCase : Any=None , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
vocab_file=__UpperCamelCase , tokenizer_file=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , **__UpperCamelCase , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
lowerCamelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
lowerCamelCase_ = {
lang_code: self.convert_tokens_to_ids(__UpperCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase_ = src_lang if src_lang is not None else """en_XX"""
lowerCamelCase_ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowercase__ ( self : List[str] ):
return self._src_lang
@src_lang.setter
def lowercase__ ( self : Optional[int] , __UpperCamelCase : str ):
lowerCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : int , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : Optional[str] , __UpperCamelCase : Optional[str] , **__UpperCamelCase : Dict ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
lowerCamelCase_ = src_lang
lowerCamelCase_ = self(__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
lowerCamelCase_ = self.convert_tokens_to_ids(__UpperCamelCase )
lowerCamelCase_ = tgt_lang_id
return inputs
def lowercase__ ( self : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : str = "en_XX" , __UpperCamelCase : Optional[List[str]] = None , __UpperCamelCase : str = "ro_RO" , **__UpperCamelCase : Optional[int] , ):
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : int ):
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase__ ( self : Dict ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase__ ( self : Dict , __UpperCamelCase : Dict ):
lowerCamelCase_ = self.convert_tokens_to_ids(__UpperCamelCase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase__ ( self : Dict , __UpperCamelCase : str ):
lowerCamelCase_ = self.convert_tokens_to_ids(__UpperCamelCase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase__ ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowerCamelCase_ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 272
| 1
|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE_ = """scheduler_config.json"""
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = 1
__snake_case : Union[str, Any] = 2
__snake_case : Union[str, Any] = 3
__snake_case : str = 4
__snake_case : Optional[int] = 5
@dataclass
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : jnp.ndarray
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : Optional[int] = SCHEDULER_CONFIG_NAME
__snake_case : Dict = ["dtype"]
__snake_case : Optional[Any] = []
__snake_case : Dict = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : str ,lowerCamelCase__ : Dict[str, Any] = None ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : List[Any]=False ,**lowerCamelCase__ : str ,) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase__ ,subfolder=lowerCamelCase__ ,return_unused_kwargs=lowerCamelCase__ ,**lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = cls.from_config(lowerCamelCase__ ,return_unused_kwargs=lowerCamelCase__ ,**lowerCamelCase__ )
if hasattr(lowerCamelCase__ ,"""create_state""" ) and getattr(lowerCamelCase__ ,"""has_state""" ,lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : Union[str, os.PathLike] ,lowerCamelCase__ : bool = False ,**lowerCamelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
self.save_config(save_directory=lowerCamelCase__ ,push_to_hub=lowerCamelCase__ ,**lowerCamelCase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = list(set([cls.__name__] + cls._compatibles ) )
SCREAMING_SNAKE_CASE = importlib.import_module(__name__.split(""".""" )[0] )
SCREAMING_SNAKE_CASE = [
getattr(lowerCamelCase__ ,lowerCamelCase__ ) for c in compatible_classes_str if hasattr(lowerCamelCase__ ,lowerCamelCase__ )
]
return compatible_classes
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
'''simple docstring'''
assert len(_SCREAMING_SNAKE_CASE ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_SCREAMING_SNAKE_CASE ) - x.ndim) ) , _SCREAMING_SNAKE_CASE )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.999 , _SCREAMING_SNAKE_CASE=jnp.floataa ) -> jnp.ndarray:
'''simple docstring'''
def alpha_bar(_SCREAMING_SNAKE_CASE ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE = []
for i in range(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_SCREAMING_SNAKE_CASE ) / alpha_bar(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return jnp.array(_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
@flax.struct.dataclass
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : jnp.ndarray
__snake_case : jnp.ndarray
__snake_case : jnp.ndarray
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] ,lowerCamelCase__ : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = scheduler.config
if config.trained_betas is not None:
SCREAMING_SNAKE_CASE = jnp.asarray(config.trained_betas ,dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
SCREAMING_SNAKE_CASE = jnp.linspace(config.beta_start ,config.beta_end ,config.num_train_timesteps ,dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE = (
jnp.linspace(
config.beta_start**0.5 ,config.beta_end**0.5 ,config.num_train_timesteps ,dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE = betas_for_alpha_bar(config.num_train_timesteps ,dtype=scheduler.dtype )
else:
raise NotImplementedError(
F"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
SCREAMING_SNAKE_CASE = 1.0 - betas
SCREAMING_SNAKE_CASE = jnp.cumprod(lowerCamelCase__ ,axis=0 )
return cls(
alphas=lowerCamelCase__ ,betas=lowerCamelCase__ ,alphas_cumprod=lowerCamelCase__ ,)
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = state.alphas_cumprod
SCREAMING_SNAKE_CASE = alphas_cumprod[timesteps] ** 0.5
SCREAMING_SNAKE_CASE = sqrt_alpha_prod.flatten()
SCREAMING_SNAKE_CASE = broadcast_to_shape_from_left(_SCREAMING_SNAKE_CASE , original_samples.shape )
SCREAMING_SNAKE_CASE = (1 - alphas_cumprod[timesteps]) ** 0.5
SCREAMING_SNAKE_CASE = sqrt_one_minus_alpha_prod.flatten()
SCREAMING_SNAKE_CASE = broadcast_to_shape_from_left(_SCREAMING_SNAKE_CASE , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = get_sqrt_alpha_prod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = get_sqrt_alpha_prod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 116
|
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
_validate_point(_SCREAMING_SNAKE_CASE )
_validate_point(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
if point:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for item in point:
if not isinstance(_SCREAMING_SNAKE_CASE , (int, float) ):
SCREAMING_SNAKE_CASE = (
"""Expected a list of numbers as input, found """
F"""{type(_SCREAMING_SNAKE_CASE ).__name__}"""
)
raise TypeError(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE = F"""Expected a list of numbers as input, found {type(_SCREAMING_SNAKE_CASE ).__name__}"""
raise TypeError(_SCREAMING_SNAKE_CASE )
else:
raise ValueError("""Missing an input""" )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
_validate_point(_SCREAMING_SNAKE_CASE )
_validate_point(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 116
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""",
}
class _UpperCamelCase ( UpperCAmelCase_ ):
'''simple docstring'''
_A = "switch_transformers"
_A = ["past_key_values"]
_A = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : int , SCREAMING_SNAKE_CASE_ : str=3_2_1_2_8 , SCREAMING_SNAKE_CASE_ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE_ : Tuple=6_4 , SCREAMING_SNAKE_CASE_ : int=2_0_4_8 , SCREAMING_SNAKE_CASE_ : str=6_4 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE_ : Optional[int]=3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE_ : List[str]=8 , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : str=0.01 , SCREAMING_SNAKE_CASE_ : Optional[int]="float32" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_2_8 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : str=1e-6 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.001 , SCREAMING_SNAKE_CASE_ : List[Any]=0.001 , SCREAMING_SNAKE_CASE_ : Any=1.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="relu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[int]=0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
_a = vocab_size
_a = d_model
_a = d_kv
_a = d_ff
_a = num_sparse_encoder_layers
_a = num_layers
_a = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_a = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
_a = self.num_layers // self.num_sparse_encoder_layers
else:
_a = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
_a = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
_a = self.num_decoder_layers # HACK: this will create 0 sparse layers
_a = num_heads
_a = num_experts
_a = expert_capacity
_a = router_bias
_a = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}""" )
_a = router_dtype
_a = router_ignore_padding_tokens
_a = relative_attention_num_buckets
_a = relative_attention_max_distance
_a = dropout_rate
_a = layer_norm_epsilon
_a = initializer_factor
_a = feed_forward_proj
_a = use_cache
_a = add_router_probs
_a = router_z_loss_coef
_a = router_aux_loss_coef
_a = self.feed_forward_proj.split('-' )
_a = act_info[-1]
_a = act_info[0] == 'gated'
if len(a_ ) > 1 and act_info[0] != "gated" or len(a_ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_a = 'gelu_new'
super().__init__(
pad_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , **a_ , )
| 562
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a__ : Union[str, Any] = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 165
| 0
|
from bisect import bisect
from itertools import accumulate
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = sorted(zip(lowerCAmelCase , lowerCAmelCase ) , key=lambda lowerCAmelCase : x[0] / x[1] , reverse=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = [i[0] for i in r], [i[1] for i in r]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(accumulate(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = bisect(lowerCAmelCase , lowerCAmelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class a__ :
A = 42 # [batch_size x 3]
A = 42 # [batch_size x 3]
A = 42 # [batch_size x 3]
A = 42 # [batch_size x 3]
A = 42
A = 42
A = 42
A = 42
A = 42
def __UpperCamelCase ( self : int ):
"""simple docstring"""
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return torch.from_numpy(np.array([self.width, self.height],dtype=np.floataa ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return torch.from_numpy(np.array([self.x_fov, self.y_fov],dtype=np.floataa ) )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.arange(self.height * self.width )
SCREAMING_SNAKE_CASE_ : str = torch.stack(
[
pixel_indices % self.width,
torch.div(_A,self.width,rounding_mode="trunc" ),
],axis=1,)
return coords
@property
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ : List[str] = self.shape
SCREAMING_SNAKE_CASE_ : Any = int(np.prod(_A ) )
SCREAMING_SNAKE_CASE_ : str = self.get_image_coords()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.broadcast_to(coords.unsqueeze(0 ),[batch_size * inner_batch_size, *coords.shape] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_camera_rays(_A )
SCREAMING_SNAKE_CASE_ : Any = rays.view(_A,inner_batch_size * self.height * self.width,2,3 )
return rays
def __UpperCamelCase ( self : Tuple,_A : torch.Tensor ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
SCREAMING_SNAKE_CASE_ : Tuple = coords.view(_A,-1,2 )
SCREAMING_SNAKE_CASE_ : List[str] = self.resolution()
SCREAMING_SNAKE_CASE_ : List[str] = self.fov()
SCREAMING_SNAKE_CASE_ : Optional[Any] = (flat.float() / (res - 1)) * 2 - 1
SCREAMING_SNAKE_CASE_ : Optional[int] = fracs * torch.tan(fov / 2 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = fracs.view(_A,-1,2 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
self.z.view(_A,1,3 )
+ self.x.view(_A,1,3 ) * fracs[:, :, :1]
+ self.y.view(_A,1,3 ) * fracs[:, :, 1:]
)
SCREAMING_SNAKE_CASE_ : Any = directions / directions.norm(dim=-1,keepdim=_A )
SCREAMING_SNAKE_CASE_ : Tuple = torch.stack(
[
torch.broadcast_to(self.origin.view(_A,1,3 ),[batch_size, directions.shape[1], 3] ),
directions,
],dim=2,)
return rays.view(_A,*_A,2,3 )
def __UpperCamelCase ( self : Optional[int],_A : int,_A : int ):
"""simple docstring"""
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin,x=self.x,y=self.y,z=self.z,width=_A,height=_A,x_fov=self.x_fov,y_fov=self.y_fov,)
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : int = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
SCREAMING_SNAKE_CASE_ : Dict = np.array([np.sin(lowerCAmelCase ), np.cos(lowerCAmelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
SCREAMING_SNAKE_CASE_ : Dict = -z * 4
SCREAMING_SNAKE_CASE_ : str = np.array([np.cos(lowerCAmelCase ), -np.sin(lowerCAmelCase ), 0.0] )
SCREAMING_SNAKE_CASE_ : int = np.cross(lowerCAmelCase , lowerCAmelCase )
origins.append(lowerCAmelCase )
xs.append(lowerCAmelCase )
ys.append(lowerCAmelCase )
zs.append(lowerCAmelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowerCAmelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowerCAmelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowerCAmelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowerCAmelCase , axis=0 ) ).float() , width=lowerCAmelCase , height=lowerCAmelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCAmelCase )) , )
| 316
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : int ):
"""simple docstring"""
UpperCamelCase__ = BertConfig.from_json_file(_snake_case )
print(F'Building PyTorch model from configuration: {config}' )
UpperCamelCase__ = BertForPreTraining(_snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _snake_case )
if __name__ == "__main__":
A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 516
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : List[Any] = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 516
| 1
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__A : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__A : int = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class __UpperCamelCase ( unittest.TestCase ):
def a__ ( self :Dict ):
snake_case_ : Tuple = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir ,"""schedulers/""" ) )
snake_case_ : List[Any] = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCamelCase ,"""src/diffusers/schedulers/scheduling_ddpm.py""" ) ,os.path.join(self.diffusers_dir ,"""schedulers/scheduling_ddpm.py""" ) ,)
def a__ ( self :Dict ):
snake_case_ : List[Any] = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def a__ ( self :Tuple ,_UpperCamelCase :str ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Tuple ,_UpperCamelCase :List[Any]=None ):
snake_case_ : List[str] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
snake_case_ : Dict = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
snake_case_ : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=1_1_9 )
snake_case_ : Any = black.format_str(_UpperCamelCase ,mode=_UpperCamelCase )
snake_case_ : Dict = os.path.join(self.diffusers_dir ,"""new_code.py""" )
with open(_UpperCamelCase ,"""w""" ,newline="""\n""" ) as f:
f.write(_UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=_UpperCamelCase )
with open(_UpperCamelCase ,"""r""" ) as f:
self.assertTrue(f.read() ,_UpperCamelCase )
def a__ ( self :int ):
snake_case_ : Union[str, Any] = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(_UpperCamelCase ,_UpperCamelCase )
def a__ ( self :List[str] ):
# Base copy consistency
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ,"""DDPMSchedulerOutput""" ,REFERENCE_CODE + """\n""" ,)
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ,"""DDPMSchedulerOutput""" ,_UpperCamelCase ,)
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" ,"""TestSchedulerOutput""" ,re.sub("""DDPM""" ,"""Test""" ,_UpperCamelCase ) ,)
# Copy consistency with a really long name
snake_case_ : Optional[int] = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' ,F'''{long_class_name}SchedulerOutput''' ,re.sub("""Bert""" ,_UpperCamelCase ,_UpperCamelCase ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" ,"""TestSchedulerOutput""" ,_UpperCamelCase ,overwrite_result=re.sub("""DDPM""" ,"""Test""" ,_UpperCamelCase ) ,)
| 712
|
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__A : List[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( lowerCamelCase_ :nn.ModuleList , lowerCamelCase_ :nn.ModuleList , lowerCamelCase_ :List[int] ):
'''simple docstring'''
snake_case_ : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ), F'''{len(lowerCamelCase_ )} != {len(lowerCamelCase_ )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__A : Optional[int] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__A : Dict = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int ):
'''simple docstring'''
try:
snake_case_ : int = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(lowerCamelCase_ ) )
def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(lowerCamelCase_ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def UpperCAmelCase ( lowerCamelCase_ :Union[str, PreTrainedModel] , lowerCamelCase_ :Union[str, Path] = "student" , lowerCamelCase_ :Union[int, None] = None , lowerCamelCase_ :Union[int, None] = None , lowerCamelCase_ :List[str]=False , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Optional[Any]=None , **lowerCamelCase_ :Dict , ):
'''simple docstring'''
snake_case_ : Optional[Any] = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
AutoTokenizer.from_pretrained(lowerCamelCase_ ).save_pretrained(lowerCamelCase_ ) # purely for convenience
snake_case_ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_ ).eval()
else:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), F'''teacher must be a model or string got type {type(lowerCamelCase_ )}'''
snake_case_ : Any = teacher.config.to_diff_dict()
try:
snake_case_ , snake_case_ : List[str] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
snake_case_ : Dict = teacher_e
if d is None:
snake_case_ : List[Any] = teacher_d
init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} )
except AttributeError: # T5
if hasattr(teacher.config , """num_encoder_layers""" ):
snake_case_ , snake_case_ : Tuple = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
snake_case_ , snake_case_ : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
snake_case_ : Optional[int] = teacher_e
if d is None:
snake_case_ : List[str] = teacher_d
if hasattr(teacher.config , """num_encoder_layers""" ):
init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} )
else:
init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCamelCase_ )
# Copy weights
snake_case_ : List[Any] = teacher.config_class(**lowerCamelCase_ )
snake_case_ : Tuple = AutoModelForSeqaSeqLM.from_config(lowerCamelCase_ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
snake_case_ : Tuple = student.load_state_dict(teacher.state_dict() , strict=lowerCamelCase_ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
snake_case_ , snake_case_ : List[str] = list(range(lowerCamelCase_ ) ), list(range(lowerCamelCase_ ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(lowerCamelCase_ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
snake_case_ : List[int] = pick_layers_to_copy(lowerCamelCase_ , lowerCamelCase_ )
if d_layers_to_copy is None:
snake_case_ : List[int] = pick_layers_to_copy(lowerCamelCase_ , lowerCamelCase_ )
try:
if hasattr(
lowerCamelCase_ , """prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCamelCase_ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCamelCase_ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCamelCase_ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCamelCase_ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCamelCase_ )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCamelCase_ )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
snake_case_ : Any = {
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(lowerCamelCase_ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 267
| 0
|
from __future__ import annotations
def UpperCamelCase__ ( lowerCAmelCase__ ):
lowercase = 0.00
lowercase = 0
for resistor in resistors:
if resistor <= 0:
lowercase = f"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(__lowerCamelCase )
first_sum += 1 / float(__lowerCamelCase )
index += 1
return 1 / first_sum
def UpperCamelCase__ ( lowerCAmelCase__ ):
lowercase = 0.00
lowercase = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase = f"""Resistor at index {index} has a negative value!"""
raise ValueError(__lowerCamelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 428
|
'''simple docstring'''
import sys
lowerCAmelCase_ : List[str] = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def _lowerCamelCase (__lowerCamelCase : str = N ) -> int:
a__ = -sys.maxsize - 1
for i in range(len(__lowerCamelCase ) - 12 ):
a__ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
a__ = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 489
| 0
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Any = LxmertConfig.from_json_file(lowercase__ )
print(f"Building PyTorch model from configuration: {config}" )
__UpperCAmelCase : List[str] = LxmertForPreTraining(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 719
|
def __SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCAmelCase = generate_large_matrix()
lowerCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
assert all(row == sorted(lowercase_ , reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ , reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase : List[Any] = (left + right) // 2
__UpperCAmelCase : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase : Dict = mid + 1
else:
__UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = len(grid[0] )
for i in range(len(lowercase_ ) ):
__UpperCAmelCase : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase : Tuple = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase : Union[str, Any] = timeit(f"{func}(grid=grid)" , setup=lowercase_ , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 675
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : List[str] = torch.device('cpu')
def lowercase__( ):
snake_case__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : Tuple = Image.open(requests.get(A , stream=A ).raw )
return im
def lowercase__( A ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def lowercase__( A , A , A ):
snake_case__ : List[Any] = dct.pop(A )
snake_case__ : Any = val
def lowercase__( A ):
snake_case__ : List[str] = []
for k in state_dict.keys():
snake_case__ : List[str] = k
if ".pwconv" in k:
snake_case__ : List[Any] = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
snake_case__ : List[Any] = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
snake_case__ : Any = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
snake_case__ : int = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
snake_case__ : Dict = k_new.split('.' )
if ls[2].isdigit():
snake_case__ : Optional[Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
snake_case__ : Optional[int] = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowercase__( A , A , A ):
snake_case__ : int = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ : Union[str, Any] = 1_0_0_0
snake_case__ : Tuple = 'huggingface/label-files'
snake_case__ : Optional[int] = 'imagenet-1k-id2label.json'
snake_case__ : List[Any] = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
snake_case__ : Dict = {int(A ): v for k, v in idalabel.items()}
snake_case__ : Optional[Any] = idalabel
snake_case__ : Tuple = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
snake_case__ : Union[str, Any] = [3, 3, 6, 4]
snake_case__ : List[str] = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
snake_case__ : List[Any] = [3, 3, 9, 6]
snake_case__ : Optional[Any] = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
snake_case__ : List[str] = [4, 3, 1_0, 5]
snake_case__ : Optional[Any] = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
snake_case__ : str = [4, 4, 1_2, 6]
snake_case__ : List[str] = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
snake_case__ : Tuple = torch.hub.load_state_dict_from_url(A , map_location='cpu' , check_hash=A )
else:
snake_case__ : List[str] = torch.load(A , map_location='cpu' )
snake_case__ : Any = checkpoint
snake_case__ : List[str] = create_rename_keys(A )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(A , A , A )
# load HuggingFace model
snake_case__ : Optional[int] = SwiftFormerForImageClassification(A ).eval()
hf_model.load_state_dict(A )
# prepare test inputs
snake_case__ : Dict = prepare_img()
snake_case__ : Any = ViTImageProcessor.from_pretrained('preprocessor_config' )
snake_case__ : str = processor(images=A , return_tensors='pt' )
# compare outputs from both models
snake_case__ : Optional[Any] = get_expected_output(A )
snake_case__ : Optional[int] = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , A , atol=1e-3 )
Path(A ).mkdir(exist_ok=A )
print(f'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(A )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
lowerCamelCase : Any = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 170
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowercase__( A ):
# A local function to see if a dot lands in the circle.
def is_in_circle(A , A ) -> bool:
snake_case__ : Optional[Any] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
snake_case__ : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(A ) )
# The ratio of the area for circle to square is pi/4.
snake_case__ : Optional[Any] = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowercase__( A , A , A = 0.0 , A = 1.0 , ):
return mean(
function_to_integrate(uniform(A , A ) ) for _ in range(A ) ) * (max_value - min_value)
def lowercase__( A , A = 0.0 , A = 1.0 ):
def identity_function(A ) -> float:
return x
snake_case__ : List[Any] = area_under_curve_estimator(
A , A , A , A )
snake_case__ : List[str] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print('******************' )
def lowercase__( A ):
def function_to_integrate(A ) -> float:
return sqrt(4.0 - x * x )
snake_case__ : Tuple = area_under_curve_estimator(
A , A , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170
| 1
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowerCAmelCase_ ( unittest.TestCase , UpperCAmelCase_ ):
def __a ( self ):
_lowercase : List[str] = load_tool('text-classification' )
self.tool.setup()
_lowercase : Dict = load_tool('text-classification' , remote=_lowercase )
def __a ( self ):
_lowercase : Tuple = self.tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(_lowercase , 'positive' )
def __a ( self ):
_lowercase : Optional[Any] = self.remote_tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(_lowercase , 'positive' )
def __a ( self ):
_lowercase : Tuple = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(_lowercase , 'positive' )
def __a ( self ):
_lowercase : Any = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(_lowercase , 'positive' )
| 707
|
from __future__ import annotations
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677
| 0
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
a : int = logging.get_logger("""transformers.models.speecht5""")
def snake_case__ ( lowercase , lowercase , lowercase ):
hf_model.apply_weight_norm()
lowerCAmelCase_: List[str] = checkpoint["input_conv.weight_g"]
lowerCAmelCase_: Optional[int] = checkpoint["input_conv.weight_v"]
lowerCAmelCase_: Optional[Any] = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase_: Any = checkpoint[F'''upsamples.{i}.1.weight_g''']
lowerCAmelCase_: List[Any] = checkpoint[F'''upsamples.{i}.1.weight_v''']
lowerCAmelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase_: List[str] = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
lowerCAmelCase_: Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
lowerCAmelCase_: int = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
lowerCAmelCase_: Dict = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
lowerCAmelCase_: Optional[int] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
lowerCAmelCase_: List[Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
lowerCAmelCase_: List[str] = checkpoint["output_conv.1.weight_g"]
lowerCAmelCase_: Union[str, Any] = checkpoint["output_conv.1.weight_v"]
lowerCAmelCase_: Union[str, Any] = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def snake_case__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , ):
if config_path is not None:
lowerCAmelCase_: Dict = SpeechTaHifiGanConfig.from_pretrained(lowercase )
else:
lowerCAmelCase_: int = SpeechTaHifiGanConfig()
lowerCAmelCase_: List[Any] = SpeechTaHifiGan(lowercase )
lowerCAmelCase_: List[Any] = torch.load(lowercase )
load_weights(orig_checkpoint["model"]["generator"] , lowercase , lowercase )
lowerCAmelCase_: Optional[Any] = np.load(lowercase )
lowerCAmelCase_: Optional[int] = stats[0].reshape(-1 )
lowerCAmelCase_: Dict = stats[1].reshape(-1 )
lowerCAmelCase_: Any = torch.from_numpy(lowercase ).float()
lowerCAmelCase_: Union[str, Any] = torch.from_numpy(lowercase ).float()
model.save_pretrained(lowercase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(lowercase )
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
a : List[str] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 613
|
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def _a ( self ):
lowerCAmelCase_: Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase__ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase__ , "num_attention_heads" ) )
class _lowercase :
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=64 , lowerCamelCase__=3 , lowerCamelCase__=3 , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=16 , lowerCamelCase__=[128, 256, 384] , lowerCamelCase__=[4, 6, 8] , lowerCamelCase__=[2, 3, 4] , lowerCamelCase__=[16, 16, 16] , lowerCamelCase__=0 , lowerCamelCase__=[2, 2, 2] , lowerCamelCase__=[2, 2, 2] , lowerCamelCase__=0.0_2 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=2 , ):
lowerCAmelCase_: int = parent
lowerCAmelCase_: Tuple = batch_size
lowerCAmelCase_: List[str] = image_size
lowerCAmelCase_: Tuple = num_channels
lowerCAmelCase_: Optional[int] = kernel_size
lowerCAmelCase_: int = stride
lowerCAmelCase_: Optional[int] = padding
lowerCAmelCase_: Tuple = hidden_sizes
lowerCAmelCase_: Union[str, Any] = num_attention_heads
lowerCAmelCase_: Tuple = depths
lowerCAmelCase_: Optional[int] = key_dim
lowerCAmelCase_: Optional[Any] = drop_path_rate
lowerCAmelCase_: List[str] = patch_size
lowerCAmelCase_: Any = attention_ratio
lowerCAmelCase_: Tuple = mlp_ratio
lowerCAmelCase_: Any = initializer_range
lowerCAmelCase_: List[str] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowerCAmelCase_: Any = is_training
lowerCAmelCase_: Optional[int] = use_labels
lowerCAmelCase_: Dict = num_labels
lowerCAmelCase_: Any = initializer_range
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_: Tuple = None
if self.use_labels:
lowerCAmelCase_: int = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_: Tuple = self.get_config()
return config, pixel_values, labels
def _a ( self ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: int = LevitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCAmelCase_: Optional[Any] = model(lowerCamelCase__ )
lowerCAmelCase_: Tuple = (self.image_size, self.image_size)
lowerCAmelCase_ , lowerCAmelCase_: Optional[int] = image_size[0], image_size[1]
for _ in range(4 ):
lowerCAmelCase_: Union[str, Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowerCAmelCase_: str = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: Tuple = self.num_labels
lowerCAmelCase_: str = LevitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCAmelCase_: Optional[Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: Dict = config_and_inputs
lowerCAmelCase_: Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Any = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE: int = (
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE: Optional[int] = False
SCREAMING_SNAKE_CASE: Optional[Any] = False
SCREAMING_SNAKE_CASE: str = False
SCREAMING_SNAKE_CASE: str = False
SCREAMING_SNAKE_CASE: List[Any] = False
def _a ( self ):
lowerCAmelCase_: List[str] = LevitModelTester(self )
lowerCAmelCase_: Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def _a ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self ):
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def _a ( self ):
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def _a ( self ):
pass
@unittest.skip(reason="Levit does not output attentions" )
def _a ( self ):
pass
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_: List[str] = model_class(lowerCamelCase__ )
lowerCAmelCase_: Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_: Any = [*signature.parameters.keys()]
lowerCAmelCase_: str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _a ( self ):
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: int = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_: Dict = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
lowerCAmelCase_: str = outputs.hidden_states
lowerCAmelCase_: List[Any] = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
lowerCAmelCase_: List[Any] = (self.model_tester.image_size, self.model_tester.image_size)
lowerCAmelCase_ , lowerCAmelCase_: int = image_size[0], image_size[1]
for _ in range(4 ):
lowerCAmelCase_: Tuple = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowerCAmelCase_: Optional[int] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
lowerCAmelCase_ , lowerCAmelCase_: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_: int = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_: Union[str, Any] = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _a ( self ):
pass
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
lowerCAmelCase_: List[str] = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _a ( self ):
lowerCAmelCase_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def _a ( self ):
if not self.model_tester.is_training:
return
lowerCAmelCase_ , lowerCAmelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_: int = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowerCAmelCase_: Tuple = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
lowerCAmelCase_: Optional[int] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
lowerCAmelCase_: Tuple = model(**lowerCamelCase__ ).loss
loss.backward()
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCAmelCase_: Any = False
lowerCAmelCase_: Tuple = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowerCAmelCase_: Any = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
lowerCAmelCase_: str = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
lowerCAmelCase_: List[str] = model(**lowerCamelCase__ ).loss
loss.backward()
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_: int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_: Optional[int] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
lowerCAmelCase_: Tuple = problem_type["title"]
lowerCAmelCase_: Optional[Any] = problem_type["num_labels"]
lowerCAmelCase_: List[str] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
lowerCAmelCase_: List[Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if problem_type["num_labels"] > 1:
lowerCAmelCase_: List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
lowerCAmelCase_: List[Any] = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase__ ) as warning_list:
lowerCAmelCase_: Dict = model(**lowerCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def _a ( self ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_: Optional[int] = LevitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def snake_case__ ( ):
lowerCAmelCase_: Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _a ( self ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _a ( self ):
lowerCAmelCase_: Any = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCamelCase__ )
lowerCAmelCase_: str = self.default_image_processor
lowerCAmelCase_: int = prepare_img()
lowerCAmelCase_: Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_: Tuple = model(**lowerCamelCase__ )
# verify the logits
lowerCAmelCase_: str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
lowerCAmelCase_: Union[str, Any] = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 613
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_clap""": [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapAudioConfig""",
"""ClapConfig""",
"""ClapTextConfig""",
],
"""processing_clap""": ["""ClapProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapModel""",
"""ClapPreTrainedModel""",
"""ClapTextModel""",
"""ClapTextModelWithProjection""",
"""ClapAudioModel""",
"""ClapAudioModelWithProjection""",
]
lowercase_ = ["""ClapFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703
|
lowercase_ = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 390
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a_ ( _UpperCAmelCase ):
a : List[Any] = 'dandelin/vilt-b32-finetuned-vqa'
a : List[str] = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
a : List[Any] = 'image_qa'
a : str = AutoProcessor
a : Any = AutoModelForVisualQuestionAnswering
a : Union[str, Any] = ['image', 'text']
a : str = ['text']
def __init__( self : str , *__UpperCamelCase : str , **__UpperCamelCase : Dict ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Any , __UpperCamelCase : "Image" , __UpperCamelCase : str ) ->Optional[int]:
'''simple docstring'''
return self.pre_processor(__UpperCamelCase , __UpperCamelCase , return_tensors="""pt""" )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : Optional[int] ) ->List[str]:
'''simple docstring'''
with torch.no_grad():
return self.model(**__UpperCamelCase ).logits
def _snake_case ( self : int , __UpperCamelCase : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 555
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : Dict = StableDiffusionSAGPipeline
a : Any = TEXT_TO_IMAGE_PARAMS
a : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
a : Tuple = False
def _snake_case ( self : List[Any] ) ->Tuple:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_UpperCAmelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_UpperCAmelCase = CLIPTextModel(__UpperCamelCase )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_UpperCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any=0 ) ->Any:
'''simple docstring'''
if str(__UpperCamelCase ).startswith("""mps""" ):
_UpperCAmelCase = torch.manual_seed(__UpperCamelCase )
else:
_UpperCAmelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
_UpperCAmelCase = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def _snake_case ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
_UpperCAmelCase = sag_pipe.to(__UpperCamelCase )
sag_pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = """."""
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sag_pipe(
[prompt] , generator=__UpperCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _snake_case ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
_UpperCAmelCase = sag_pipe.to(__UpperCamelCase )
sag_pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = """."""
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sag_pipe(
[prompt] , generator=__UpperCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _snake_case ( self : str ) ->str:
'''simple docstring'''
_UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
_UpperCAmelCase = sag_pipe.to(__UpperCamelCase )
sag_pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = """."""
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sag_pipe(
[prompt] , width=7_68 , height=5_12 , generator=__UpperCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
_UpperCAmelCase = output.images
assert image.shape == (1, 5_12, 7_68, 3)
| 555
| 1
|
"""simple docstring"""
def lowerCAmelCase_ ( lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Any ):
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(lowercase_ , n - 1 , lowercase_ ) * a) % mod
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = binary_exponentiation(lowercase_ , n / 2 , lowercase_ )
return (b * b) % mod
# a prime number
_lowerCamelCase = 7_01
_lowerCamelCase = 10_00_00_00_00
_lowerCamelCase = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 401
|
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_ ( lowercase_ : int , lowercase_ : int ):
'''simple docstring'''
if b == 0:
return (1, 0)
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : Tuple = extended_euclid(lowercase_ , a % b )
__SCREAMING_SNAKE_CASE : int = a // b
return (y, x - k * y)
def lowerCAmelCase_ ( lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int ):
'''simple docstring'''
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : int = extended_euclid(lowercase_ , lowercase_ )
__SCREAMING_SNAKE_CASE : Any = na * na
__SCREAMING_SNAKE_CASE : str = ra * x * na + ra * y * na
return (n % m + m) % m
def lowerCAmelCase_ ( lowercase_ : int , lowercase_ : int ):
'''simple docstring'''
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : str = extended_euclid(lowercase_ , lowercase_ )
if b < 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = (b % n + n) % n
return b
def lowerCAmelCase_ ( lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = invert_modulo(lowercase_ , lowercase_ ), invert_modulo(lowercase_ , lowercase_ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = na * na
__SCREAMING_SNAKE_CASE : List[Any] = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 401
| 1
|
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ) -> Tuple: # noqa: E741
"""simple docstring"""
while r - l > 1:
__lowerCamelCase = (l + r) // 2
if v[m] >= key:
__lowerCamelCase = m
else:
__lowerCamelCase = m # noqa: E741
return r
def lowerCamelCase_ ( UpperCamelCase__ : list[int] ) -> int:
"""simple docstring"""
if len(UpperCamelCase__ ) == 0:
return 0
__lowerCamelCase = [0] * len(UpperCamelCase__ )
__lowerCamelCase = 1
__lowerCamelCase = v[0]
for i in range(1 , len(UpperCamelCase__ ) ):
if v[i] < tail[0]:
__lowerCamelCase = v[i]
elif v[i] > tail[length - 1]:
__lowerCamelCase = v[i]
length += 1
else:
__lowerCamelCase = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 469
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(UpperCamelCase__ ):
return ext
raise Exception(
F"""Unable to determine file format from file extension {path}. """
F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
__lowerCamelCase = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format
__lowerCamelCase = PipelineDataFormat.from_str(
format=UpperCamelCase__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(UpperCamelCase__ , UpperCamelCase__ )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = nlp
__lowerCamelCase = reader
@staticmethod
def lowercase_ ( lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = parser.add_parser('run' , help='Run a pipeline through the CLI' )
run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' )
run_parser.add_argument('--input' , type=lowerCamelCase__ , help='Path to the file to use for inference' )
run_parser.add_argument('--output' , type=lowerCamelCase__ , help='Path to the file that will be used post to write results.' )
run_parser.add_argument('--model' , type=lowerCamelCase__ , help='Name or path to the model to instantiate.' )
run_parser.add_argument('--config' , type=lowerCamelCase__ , help='Name or path to the model\'s config to instantiate.' )
run_parser.add_argument(
'--tokenizer' , type=lowerCamelCase__ , help='Name of the tokenizer to use. (default: same as the model name)' )
run_parser.add_argument(
'--column' , type=lowerCamelCase__ , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , )
run_parser.add_argument(
'--format' , type=lowerCamelCase__ , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , )
run_parser.add_argument(
'--device' , type=lowerCamelCase__ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.' )
run_parser.set_defaults(func=lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self._nlp, []
for entry in self._reader:
__lowerCamelCase = nlp(**lowerCamelCase__ ) if self._reader.is_multi_columns else nlp(lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
outputs.append(lowerCamelCase__ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
__lowerCamelCase = self._reader.save_binary(lowerCamelCase__ )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(lowerCamelCase__ )
| 469
| 1
|
'''simple docstring'''
def __A ( _SCREAMING_SNAKE_CASE : int = 2_0_0 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [0] * (pence + 1)
__SCREAMING_SNAKE_CASE : int = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_SCREAMING_SNAKE_CASE , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 717
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : str = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE : Tuple = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self.tmpdirname , a__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(a__ , a__ )
def a_ ( self , **a__ ):
return BertTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self , **a__ ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self , **a__ ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self ):
shutil.rmtree(self.tmpdirname )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE : List[Any] = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Optional[int] = AlignProcessor(tokenizer=a__ , image_processor=a__ )
processor_slow.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : List[str] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=a__ )
__SCREAMING_SNAKE_CASE : Optional[int] = AlignProcessor(tokenizer=a__ , image_processor=a__ )
processor_fast.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : List[str] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , a__ )
self.assertIsInstance(processor_fast.tokenizer , a__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , a__ )
self.assertIsInstance(processor_fast.image_processor , a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor(do_normalize=a__ , padding_value=1.0 )
__SCREAMING_SNAKE_CASE : Any = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=a__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = AlignProcessor(tokenizer=a__ , image_processor=a__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = image_processor(a__ , return_tensors="np" )
__SCREAMING_SNAKE_CASE : Dict = processor(images=a__ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
__SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : List[Any] = AlignProcessor(tokenizer=a__ , image_processor=a__ )
__SCREAMING_SNAKE_CASE : Any = "lower newer"
__SCREAMING_SNAKE_CASE : List[str] = processor(text=a__ )
__SCREAMING_SNAKE_CASE : int = tokenizer(a__ , padding="max_length" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = AlignProcessor(tokenizer=a__ , image_processor=a__ )
__SCREAMING_SNAKE_CASE : List[str] = "lower newer"
__SCREAMING_SNAKE_CASE : str = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(a__ ):
processor()
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Dict = AlignProcessor(tokenizer=a__ , image_processor=a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE : Dict = processor.batch_decode(a__ )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.batch_decode(a__ )
self.assertListEqual(a__ , a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : str = AlignProcessor(tokenizer=a__ , image_processor=a__ )
__SCREAMING_SNAKE_CASE : Any = "lower newer"
__SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : List[str] = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 564
| 0
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowercase( lowercase__ , unittest.TestCase ):
'''simple docstring'''
__a : Union[str, Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def snake_case_ ( self , __a=0 ):
__lowerCamelCase : List[str] = floats_tensor((1, 3, 128, 128) , rng=random.Random(__a ) )
__lowerCamelCase : Any = np.random.RandomState(__a )
__lowerCamelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case_ ( self ):
__lowerCamelCase : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__a )
__lowerCamelCase : Dict = self.get_dummy_inputs()
__lowerCamelCase : List[str] = pipe(**__a ).images
__lowerCamelCase : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase : Union[str, Any] = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def snake_case_ ( self ):
__lowerCamelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__a )
pipe.set_progress_bar_config(disable=__a )
__lowerCamelCase : Optional[Any] = self.get_dummy_inputs()
__lowerCamelCase : int = pipe(**__a ).images
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase : Any = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case_ ( self ):
__lowerCamelCase : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase : List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
# warmup pass to apply optimizations
__lowerCamelCase : str = pipe(**self.get_dummy_inputs() )
__lowerCamelCase : str = self.get_dummy_inputs()
__lowerCamelCase : Tuple = pipe(**__a ).images
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase : Optional[int] = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case_ ( self ):
__lowerCamelCase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
__lowerCamelCase : Tuple = self.get_dummy_inputs()
__lowerCamelCase : Optional[Any] = pipe(**__a ).images
__lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase : List[Any] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case_ ( self ):
__lowerCamelCase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase : List[str] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
__lowerCamelCase : Dict = self.get_dummy_inputs()
__lowerCamelCase : Union[str, Any] = pipe(**__a ).images
__lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase : Any = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case_ ( self ):
__lowerCamelCase : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
__lowerCamelCase : Dict = self.get_dummy_inputs()
__lowerCamelCase : List[str] = pipe(**__a ).images
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase : str = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowercase( unittest.TestCase ):
'''simple docstring'''
@property
def snake_case_ ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case_ ( self ):
__lowerCamelCase : Any = ort.SessionOptions()
__lowerCamelCase : Tuple = False
return options
def snake_case_ ( self ):
__lowerCamelCase : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__lowerCamelCase : str = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__lowerCamelCase : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a )
__lowerCamelCase : List[str] = 'A fantasy landscape, trending on artstation'
__lowerCamelCase : Optional[Any] = np.random.RandomState(0 )
__lowerCamelCase : List[Any] = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__a , output_type='np' , )
__lowerCamelCase : Any = output.images
__lowerCamelCase : int = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowerCamelCase : int = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def snake_case_ ( self ):
__lowerCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__lowerCamelCase : str = init_image.resize((768, 512) )
__lowerCamelCase : Any = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
__lowerCamelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=__a , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a )
__lowerCamelCase : Tuple = 'A fantasy landscape, trending on artstation'
__lowerCamelCase : Union[str, Any] = np.random.RandomState(0 )
__lowerCamelCase : int = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__a , output_type='np' , )
__lowerCamelCase : Dict = output.images
__lowerCamelCase : List[str] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowerCamelCase : int = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 594
|
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase( lowercase__ , unittest.TestCase ):
'''simple docstring'''
__a : str = DebertaTokenizer
__a : Optional[int] = True
__a : Tuple = DebertaTokenizerFast
def snake_case_ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
__lowerCamelCase : int = dict(zip(__a , range(len(__a ) ) ) )
__lowerCamelCase : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__lowerCamelCase : Optional[int] = {'unk_token': '[UNK]'}
__lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def snake_case_ ( self , **__a ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def snake_case_ ( self , __a ):
__lowerCamelCase : Dict = 'lower newer'
__lowerCamelCase : Dict = 'lower newer'
return input_text, output_text
def snake_case_ ( self ):
__lowerCamelCase : List[Any] = self.get_tokenizer()
__lowerCamelCase : str = 'lower newer'
__lowerCamelCase : str = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
__lowerCamelCase : Optional[Any] = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__lowerCamelCase : List[Any] = tokens + [tokenizer.unk_token]
__lowerCamelCase : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def snake_case_ ( self ):
__lowerCamelCase : List[str] = self.get_tokenizer()
__lowerCamelCase : List[Any] = tokenizer('Hello' , 'World' )
__lowerCamelCase : int = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , __a )
@slow
def snake_case_ ( self ):
__lowerCamelCase : int = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
__lowerCamelCase : Dict = tokenizer.encode('sequence builders' , add_special_tokens=__a )
__lowerCamelCase : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=__a )
__lowerCamelCase : str = tokenizer.encode(
'sequence builders' , add_special_tokens=__a , add_prefix_space=__a )
__lowerCamelCase : str = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=__a , add_prefix_space=__a )
__lowerCamelCase : Dict = tokenizer.build_inputs_with_special_tokens(__a )
__lowerCamelCase : Dict = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case_ ( self ):
__lowerCamelCase : str = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__lowerCamelCase : str = tokenizer_class.from_pretrained('microsoft/deberta-base' )
__lowerCamelCase : Optional[int] = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
__lowerCamelCase : Any = tokenizer(__a , padding=__a )
__lowerCamelCase : int = [tokenizer.decode(__a , skip_special_tokens=__a ) for seq in encoding['input_ids']]
# fmt: off
__lowerCamelCase : Optional[int] = {
'input_ids': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__lowerCamelCase : Union[str, Any] = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , __a )
for expected, decoded in zip(__a , __a ):
self.assertEqual(__a , __a )
| 594
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = """bert-generation"""
def __init__( self , snake_case_=5_0358 , snake_case_=1024 , snake_case_=24 , snake_case_=16 , snake_case_=4096 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=0.0_2 , snake_case_=1e-1_2 , snake_case_=0 , snake_case_=2 , snake_case_=1 , snake_case_="absolute" , snake_case_=True , **snake_case_ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
__UpperCAmelCase: str = vocab_size
__UpperCAmelCase: Optional[Any] = hidden_size
__UpperCAmelCase: str = num_hidden_layers
__UpperCAmelCase: Optional[int] = num_attention_heads
__UpperCAmelCase: Optional[int] = hidden_act
__UpperCAmelCase: Tuple = intermediate_size
__UpperCAmelCase: Optional[Any] = hidden_dropout_prob
__UpperCAmelCase: Any = attention_probs_dropout_prob
__UpperCAmelCase: Optional[Any] = max_position_embeddings
__UpperCAmelCase: int = initializer_range
__UpperCAmelCase: int = layer_norm_eps
__UpperCAmelCase: Optional[Any] = position_embedding_type
__UpperCAmelCase: Any = use_cache
| 466
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 466
| 1
|
from __future__ import annotations
import numpy as np
def A ( lowercase__ : np.ndarray ) -> tuple[np.ndarray, np.ndarray]:
UpperCamelCase__ , UpperCamelCase__ :str = np.shape(lowercase__ )
if rows != columns:
UpperCamelCase__ :str = (
"""'table' has to be of square shaped array but got a """
f"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(lowercase__ )
UpperCamelCase__ :Union[str, Any] = np.zeros((rows, columns) )
UpperCamelCase__ :int = np.zeros((rows, columns) )
for i in range(lowercase__ ):
for j in range(lowercase__ ):
UpperCamelCase__ :List[str] = sum(lower[i][k] * upper[k][j] for k in range(lowercase__ ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
UpperCamelCase__ :int = (table[i][j] - total) / upper[j][j]
UpperCamelCase__ :Optional[Any] = 1
for j in range(lowercase__ , lowercase__ ):
UpperCamelCase__ :int = sum(lower[i][k] * upper[k][j] for k in range(lowercase__ ) )
UpperCamelCase__ :Optional[int] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__A =True
except (ImportError, ModuleNotFoundError):
__A =False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _UpperCamelCase ( UpperCamelCase__ ):
re.sub("""<n>""" , """""" , UpperCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase__ ) )
| 407
| 0
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase=2 , _lowercase=3 , _lowercase=64 , _lowercase=None ):
"""simple docstring"""
_lowerCAmelCase = np.random.default_rng(_lowercase )
_lowerCAmelCase = length
_lowerCAmelCase = rng.normal(size=(length,) ).astype(np.floataa )
_lowerCAmelCase = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ):
"""simple docstring"""
return self.length
def __getitem__( self , _lowercase ):
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class UpperCAmelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , _lowercase=0 , _lowercase=0 , _lowercase=False ):
"""simple docstring"""
super().__init__()
_lowerCAmelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCAmelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCAmelCase = True
def _lowercase ( self , _lowercase=None ):
"""simple docstring"""
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCAmelCase = False
return x * self.a[0] + self.b[0]
class UpperCAmelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , _lowercase=0 , _lowercase=0 , _lowercase=False ):
"""simple docstring"""
super().__init__()
_lowerCAmelCase = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
_lowerCAmelCase = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
_lowerCAmelCase = True
def _lowercase ( self , _lowercase=None ):
"""simple docstring"""
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCAmelCase = False
return x * self.a + self.b
def A (__lowerCamelCase :List[str] , __lowerCamelCase :int = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
_lowerCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_lowerCAmelCase = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
_lowerCAmelCase = load_dataset("""csv""" , data_files=__lowerCamelCase )
_lowerCAmelCase = datasets["""train"""].unique("""label""" )
_lowerCAmelCase = {v: i for i, v in enumerate(__lowerCamelCase )}
def tokenize_function(__lowerCamelCase :Dict ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCamelCase , max_length=__lowerCamelCase , padding="""max_length""" )
if "label" in examples:
_lowerCAmelCase = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCAmelCase = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(__lowerCamelCase :Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCamelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(__lowerCamelCase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
_lowerCAmelCase = DataLoader(tokenized_datasets["""train"""] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=2 )
_lowerCAmelCase = DataLoader(tokenized_datasets["""validation"""] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 162
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_lowercase = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 162
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : Union[str, Any]=[1_0, 2_0, 3_0, 4_0] , __lowerCAmelCase : Any=[1, 1, 2, 1] , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Tuple="relu" , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : List[str]=None , ):
"""simple docstring"""
_lowerCamelCase : str = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : int = image_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : List[str] = embeddings_size
_lowerCamelCase : Optional[Any] = hidden_sizes
_lowerCamelCase : Dict = depths
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Tuple = num_labels
_lowerCamelCase : Dict = scope
_lowerCamelCase : List[str] = len(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
_lowerCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = RegNetModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[str] = model(__lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.num_labels
_lowerCamelCase : str = RegNetForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Any = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Any = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
snake_case__ : int = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Dict = False
snake_case__ : List[Any] = False
snake_case__ : Optional[int] = False
snake_case__ : List[str] = False
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Tuple = RegNetModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[int] = [*signature.parameters.keys()]
_lowerCamelCase : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class(config=__lowerCAmelCase )
for name, module in model.named_modules():
if isinstance(__lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase : Any = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Dict = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCamelCase : Tuple = layer_type
_lowerCamelCase : Tuple = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : int = RegNetModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCAmelCase )
_lowerCamelCase : str = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : Tuple = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 83
|
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCAmelCase_ ( __A : Features ):
'''simple docstring'''
snake_case: Any = np.inf
def set_batch_size(__A : FeatureType ) -> None:
nonlocal batch_size
if isinstance(__A , __A ):
snake_case: str = min(__A , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__A , __A ):
snake_case: Optional[int] = min(__A , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__A , __A ) and feature.dtype == "binary":
snake_case: Union[str, Any] = min(__A , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__A , __A )
return None if batch_size is np.inf else batch_size
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
snake_case: str = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
snake_case: int = _PACKAGED_DATASETS_MODULES['parquet'][1]
snake_case: Optional[Any] = Parquet(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , hash=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self ):
'''simple docstring'''
if self.streaming:
snake_case: Union[str, Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
snake_case: Dict = None
snake_case: List[Any] = None
snake_case: Optional[int] = None
snake_case: Any = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
snake_case: List[Any] = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
snake_case: Union[str, Any] = dataset
snake_case: Any = path_or_buf
snake_case: Dict = batch_size or get_writer_batch_size(dataset.features )
snake_case: Optional[Any] = parquet_writer_kwargs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
snake_case: Any = self._write(file_obj=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , **self.parquet_writer_kwargs )
else:
snake_case: Optional[int] = self._write(file_obj=self.path_or_buf , batch_size=SCREAMING_SNAKE_CASE__ , **self.parquet_writer_kwargs )
return written
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: List[str] = 0
snake_case: List[Any] = parquet_writer_kwargs.pop('path_or_buf' , SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.dataset.features.arrow_schema
snake_case: Optional[int] = pq.ParquetWriter(SCREAMING_SNAKE_CASE__ , schema=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , SCREAMING_SNAKE_CASE__ ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
snake_case: Any = query_table(
table=self.dataset._data , key=slice(SCREAMING_SNAKE_CASE__ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(SCREAMING_SNAKE_CASE__ )
written += batch.nbytes
writer.close()
return written
| 329
| 0
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
__magic_name__ = {"""target_lang""": """fi""", """source_lang""": """en"""}
__magic_name__ = """>>zh<<"""
__magic_name__ = """Helsinki-NLP/"""
if is_torch_available():
__magic_name__ = """pt"""
elif is_tf_available():
__magic_name__ = """tf"""
else:
__magic_name__ = """jax"""
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
snake_case = MarianTokenizer
snake_case = False
snake_case = True
def __UpperCAmelCase ( self : Any ):
super().setUp()
lowerCamelCase__ = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowerCamelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase__ = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
lowerCamelCase__ = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : str , **SCREAMING_SNAKE_CASE_ : Tuple ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
return (
"This is a test",
"This is a test",
)
def __UpperCAmelCase ( self : Tuple ):
lowerCamelCase__ = """</s>"""
lowerCamelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Any ):
lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 9 )
def __UpperCAmelCase ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __UpperCAmelCase ( self : Optional[int] ):
lowerCamelCase__ = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" )
lowerCamelCase__ = en_de_tokenizer(["""I am a small frog"""] , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , batch.input_ids[0] )
lowerCamelCase__ = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = [x.name for x in Path(SCREAMING_SNAKE_CASE_ ).glob("""*""" )]
self.assertIn("""source.spm""" , SCREAMING_SNAKE_CASE_ )
MarianTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Any ):
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = tok(
["""I am a small frog""" * 1000, """I am a small frog"""] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def __UpperCAmelCase ( self : Tuple ):
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __UpperCAmelCase ( self : Optional[int] ):
# fmt: off
lowerCamelCase__ = {"""input_ids""": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def __UpperCAmelCase ( self : Tuple ):
lowerCamelCase__ = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
lowerCamelCase__ = """Tämä on testi"""
lowerCamelCase__ = """This is a test"""
lowerCamelCase__ = [76, 7, 2047, 2]
lowerCamelCase__ = [69, 12, 11, 940, 2]
lowerCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = tokenizer(text_target=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 702
|
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def _A ( __lowercase ):
"""simple docstring"""
if num <= 0:
raise ValueError("""math domain error""" )
return quad(__lowercase , 0 , __lowercase , args=(__lowercase) )[0]
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
return math.pow(__lowercase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 258
| 0
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : Optional[Any] = os.path.abspath(__lowercase )
logger.info(f'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
UpperCamelCase__ : Union[str, Any] = tf.train.list_variables(__lowercase )
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = []
UpperCamelCase__ : List[Any] = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
UpperCamelCase__ : Tuple = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(f'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
UpperCamelCase__ : str = name[1:]
# figure out how many levels deep the name is
UpperCamelCase__ : Union[str, Any] = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(__lowercase )
# read data
UpperCamelCase__ : List[Any] = tf.train.load_variable(__lowercase , __lowercase )
names.append('''/'''.join(__lowercase ) )
arrays.append(__lowercase )
logger.info(f'''Read a total of {len(__lowercase ):,} layers''' )
# Sanity check
if len(set(__lowercase ) ) != 1:
raise ValueError(f'''Found layer names with different depths (layer depth {list(set(__lowercase ) )})''' )
UpperCamelCase__ : Union[str, Any] = list(set(__lowercase ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(__lowercase , __lowercase ):
UpperCamelCase__ : Union[str, Any] = full_name.split('''/''' )
UpperCamelCase__ : List[Any] = model
UpperCamelCase__ : List[str] = []
for i, m_name in enumerate(__lowercase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
UpperCamelCase__ : Optional[int] = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
UpperCamelCase__ : Any = getattr(__lowercase , '''embeddings''' )
UpperCamelCase__ : str = getattr(__lowercase , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
UpperCamelCase__ : Union[str, Any] = getattr(__lowercase , '''encoder''' )
UpperCamelCase__ : Any = getattr(__lowercase , '''layer''' )
UpperCamelCase__ : Union[str, Any] = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
UpperCamelCase__ : int = getattr(__lowercase , '''pooler''' )
UpperCamelCase__ : Any = getattr(__lowercase , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
UpperCamelCase__ : List[Any] = getattr(__lowercase , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
UpperCamelCase__ : Optional[int] = getattr(__lowercase , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
UpperCamelCase__ : Optional[int] = getattr(__lowercase , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
UpperCamelCase__ : Union[str, Any] = getattr(__lowercase , '''token_type_embeddings''' )
else:
raise ValueError(f'''Unknown embedding layer with name {full_name}''' )
trace.append('''weight''' )
UpperCamelCase__ : Optional[Any] = getattr(__lowercase , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
UpperCamelCase__ : Optional[int] = getattr(__lowercase , '''attention''' )
UpperCamelCase__ : Optional[Any] = getattr(__lowercase , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
UpperCamelCase__ : Union[str, Any] = getattr(__lowercase , '''attention''' )
UpperCamelCase__ : Optional[int] = getattr(__lowercase , '''output''' )
UpperCamelCase__ : Union[str, Any] = getattr(__lowercase , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
UpperCamelCase__ : List[str] = getattr(__lowercase , '''attention''' )
UpperCamelCase__ : Tuple = getattr(__lowercase , '''output''' )
UpperCamelCase__ : List[Any] = getattr(__lowercase , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
UpperCamelCase__ : List[Any] = getattr(__lowercase , '''output''' )
UpperCamelCase__ : List[str] = getattr(__lowercase , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
UpperCamelCase__ : List[str] = getattr(__lowercase , '''output''' )
UpperCamelCase__ : Any = getattr(__lowercase , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
UpperCamelCase__ : List[Any] = getattr(__lowercase , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
UpperCamelCase__ : List[Any] = getattr(__lowercase , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
UpperCamelCase__ : Any = getattr(__lowercase , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
UpperCamelCase__ : List[Any] = getattr(__lowercase , '''intermediate''' )
UpperCamelCase__ : Union[str, Any] = getattr(__lowercase , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
UpperCamelCase__ : List[str] = getattr(__lowercase , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
UpperCamelCase__ : List[Any] = getattr(__lowercase , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
UpperCamelCase__ : Any = getattr(__lowercase , '''weight''' )
else:
logger.warning(f'''Ignored {m_name}''' )
# for certain layers reshape is necessary
UpperCamelCase__ : Dict = '''.'''.join(__lowercase )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , __lowercase ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' , __lowercase ):
UpperCamelCase__ : Any = array.reshape(pointer.data.shape )
if "kernel" in full_name:
UpperCamelCase__ : int = array.transpose()
if pointer.shape == array.shape:
UpperCamelCase__ : str = torch.from_numpy(__lowercase )
else:
raise ValueError(
f'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
f''' {array.shape}''' )
logger.info(f'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# Instantiate model
logger.info(f'''Loading model based on config from {config_path}...''' )
UpperCamelCase__ : Any = BertConfig.from_json_file(__lowercase )
UpperCamelCase__ : int = BertModel(__lowercase )
# Load weights from checkpoint
logger.info(f'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(__lowercase , __lowercase , __lowercase )
# Save pytorch-model
logger.info(f'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
lowerCamelCase =parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 285
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCamelCase : Tuple = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Union[str, Any] = '''ResNetConfig'''
# Base docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[Any] = [1, 2_048, 7, 7]
# Image classification docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[int] = '''tiger cat'''
_lowerCamelCase : str = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 3 , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Union[str, Any] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def A ( self : Tuple , lowercase : Tensor ):
'''simple docstring'''
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowercase )
_snake_case = self.pooler(lowercase )
return embedding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int , lowercase : int , lowercase : int = 2 ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : List[str] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" , lowercase : int = 4 ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : Dict , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase : ResNetConfig , lowercase : int , lowercase : int , lowercase : int = 2 , lowercase : int = 2 , ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def A ( self : str , lowercase : Tensor , lowercase : bool = False , lowercase : bool = True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowercase )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=lowercase , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ResNetConfig
_UpperCAmelCase : Tuple = "resnet"
_UpperCAmelCase : Optional[Any] = "pixel_values"
_UpperCAmelCase : Dict = True
def A ( self : List[str] , lowercase : Dict ):
'''simple docstring'''
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Tuple , lowercase : List[Any] , lowercase : Optional[Any]=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = value
_lowerCamelCase : str = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : int = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Union[str, Any] , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : int ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowercase )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowercase )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowercase , lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowercase )
super()._init_backbone(lowercase )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def A ( self : Dict , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , )
| 686
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_A = int(input('Enter number: ').strip())
print(F'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
| 714
|
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , _snake_case : List[str] , _snake_case : List[Any]=13 , _snake_case : List[str]=7 , _snake_case : Dict=True , _snake_case : str=True , _snake_case : Optional[Any]=True , _snake_case : Tuple=True , _snake_case : List[Any]=99 , _snake_case : Dict=16 , _snake_case : Tuple=36 , _snake_case : Optional[int]=6 , _snake_case : Optional[int]=6 , _snake_case : Tuple=6 , _snake_case : Optional[int]=37 , _snake_case : Dict="gelu" , _snake_case : str=0.1 , _snake_case : Tuple=0.1 , _snake_case : List[str]=512 , _snake_case : Any=16 , _snake_case : Optional[int]=2 , _snake_case : Optional[int]=0.02 , _snake_case : Union[str, Any]=3 , _snake_case : int=4 , _snake_case : Optional[int]=None , ) -> List[str]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = embedding_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_hidden_groups
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def lowerCAmelCase_ ( self : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Dict ) -> Union[str, Any]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase_ ( self : int , _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = AlbertModel(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
SCREAMING_SNAKE_CASE__ = model(_snake_case , token_type_ids=_snake_case )
SCREAMING_SNAKE_CASE__ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Tuple , _snake_case : Optional[Any] , _snake_case : int , _snake_case : str , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = AlbertForPreTraining(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , sentence_order_label=_snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : int , _snake_case : Any , _snake_case : List[Any] , _snake_case : Any , _snake_case : str , _snake_case : Tuple , _snake_case : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = AlbertForMaskedLM(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AlbertForQuestionAnswering(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , start_positions=_snake_case , end_positions=_snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Optional[Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : Optional[int] , _snake_case : int , _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = AlbertForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : int , _snake_case : Any , _snake_case : str , _snake_case : List[Any] , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = AlbertForTokenClassification(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Tuple , _snake_case : str , _snake_case : Any , _snake_case : Any , _snake_case : int , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : int ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = AlbertForMultipleChoice(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
a = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
a = True
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : Dict , _snake_case : str , _snake_case : str=False ) -> Dict:
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
if return_labels:
if model_class in get_values(_snake_case ):
SCREAMING_SNAKE_CASE__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case )
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
return inputs_dict
def lowerCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = AlbertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def lowerCAmelCase_ ( self : List[str] ) -> List[str]:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def lowerCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_snake_case )
def lowerCAmelCase_ ( self : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def lowerCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_snake_case )
def lowerCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
def lowerCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def lowerCAmelCase_ ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ = type
self.model_tester.create_and_check_model(*_snake_case )
@slow
def lowerCAmelCase_ ( self : Any ) -> str:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = AlbertModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_torch
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE__ = AlbertModel.from_pretrained("albert-base-v2" )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(_snake_case , attention_mask=_snake_case )[0]
SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _snake_case )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _snake_case , atol=1e-4 ) )
| 538
| 0
|
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def a ( lowerCamelCase__ ):
'''simple docstring'''
return getitem, k
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return setitem, k, v
def a ( lowerCamelCase__ ):
'''simple docstring'''
return delitem, k
def a ( lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ ):
'''simple docstring'''
try:
return fun(lowerCamelCase__ , *lowerCamelCase__ ), None
except Exception as e:
return None, e
lowerCamelCase :Dict = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCamelCase :str = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCamelCase :List[str] = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCamelCase :List[Any] = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCamelCase :Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase :Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = HashMap(initial_block_size=4 )
A_ : List[Any] = {}
for _, (fun, *args) in enumerate(lowerCamelCase__ ):
A_, A_ : Optional[int] = _run_operation(lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ )
A_, A_ : List[str] = _run_operation(lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ )
assert my_res == py_res
assert str(lowerCamelCase__ ) == str(lowerCamelCase__ )
assert set(lowerCamelCase__ ) == set(lowerCamelCase__ )
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
assert set(my.items() ) == set(py.items() )
def a ( ):
'''simple docstring'''
def is_public(lowerCamelCase__ ) -> bool:
return not name.startswith("""_""" )
A_ : Optional[int] = {name for name in dir({} ) if is_public(lowerCamelCase__ )}
A_ : Union[str, Any] = {name for name in dir(HashMap() ) if is_public(lowerCamelCase__ )}
assert dict_public_names > hash_public_names
| 667
|
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __snake_case ( lowercase : Dict ):
snake_case_ = {}
snake_case_ = job["started_at"]
snake_case_ = job["completed_at"]
snake_case_ = date_parser.parse(lowercase )
snake_case_ = date_parser.parse(lowercase )
snake_case_ = round((end_datetime - start_datetime).total_seconds() / 60.0 )
snake_case_ = start
snake_case_ = end
snake_case_ = duration_in_min
return job_info
def __snake_case ( lowercase : Tuple , lowercase : Dict=None ):
snake_case_ = None
if token is not None:
snake_case_ = {"Accept": "application/vnd.github+json", "Authorization": f'''Bearer {token}'''}
snake_case_ = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
snake_case_ = requests.get(lowercase , headers=lowercase ).json()
snake_case_ = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(lowercase ) for job in result["jobs"]} )
snake_case_ = math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowercase ):
snake_case_ = requests.get(url + f'''&page={i + 2}''' , headers=lowercase ).json()
job_time.update({job["name"]: extract_time_from_single_job(lowercase ) for job in result["jobs"]} )
return job_time
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
lowercase__ = parser.parse_args()
lowercase__ = get_job_time(args.workflow_run_id)
lowercase__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v['duration']}""")
| 508
| 0
|
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __lowerCamelCase ( snake_case__ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.size
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_SCREAMING_SNAKE_CASE = image.resize((w, h) ,resample=PIL_INTERPOLATION["""lanczos"""] )
_SCREAMING_SNAKE_CASE = np.array(snake_case__ ).astype(np.floataa ) / 255.0
_SCREAMING_SNAKE_CASE = image[None].transpose(0 ,3 ,1 ,2 )
_SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ )
return 2.0 * image - 1.0
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Dict , UpperCAmelCase_: VQModel , UpperCAmelCase_: UNetaDModel , UpperCAmelCase_: Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ )
@torch.no_grad()
def __call__( self: str , UpperCAmelCase_: Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: Optional[int] = 100 , UpperCAmelCase_: Optional[float] = 0.0 , UpperCAmelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase_: Optional[str] = "pil" , UpperCAmelCase_: bool = True , ):
'''simple docstring'''
if isinstance(UpperCAmelCase_ , PIL.Image.Image ):
_SCREAMING_SNAKE_CASE = 1
elif isinstance(UpperCAmelCase_ , torch.Tensor ):
_SCREAMING_SNAKE_CASE = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase_ )}' )
if isinstance(UpperCAmelCase_ , PIL.Image.Image ):
_SCREAMING_SNAKE_CASE = preprocess(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_SCREAMING_SNAKE_CASE = (batch_size, self.unet.config.in_channels // 2, height, width)
_SCREAMING_SNAKE_CASE = next(self.unet.parameters() ).dtype
_SCREAMING_SNAKE_CASE = randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=self.device , dtype=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image.to(device=self.device , dtype=UpperCAmelCase_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase_ , device=self.device )
_SCREAMING_SNAKE_CASE = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_SCREAMING_SNAKE_CASE = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_SCREAMING_SNAKE_CASE = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_SCREAMING_SNAKE_CASE = {}
if accepts_eta:
_SCREAMING_SNAKE_CASE = eta
for t in self.progress_bar(UpperCAmelCase_ ):
# concat latents and low resolution image in the channel dimension.
_SCREAMING_SNAKE_CASE = torch.cat([latents, image] , dim=1 )
_SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# predict the noise residual
_SCREAMING_SNAKE_CASE = self.unet(UpperCAmelCase_ , UpperCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_SCREAMING_SNAKE_CASE = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
# decode the image latents with the VQVAE
_SCREAMING_SNAKE_CASE = self.vqvae.decode(UpperCAmelCase_ ).sample
_SCREAMING_SNAKE_CASE = torch.clamp(UpperCAmelCase_ , -1.0 , 1.0 )
_SCREAMING_SNAKE_CASE = image / 2 + 0.5
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase_ )
| 569
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_SCREAMING_SNAKE_CASE = []
for char_count in range(snake_case__ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(snake_case__ )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
| 569
| 1
|
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__snake_case : Tuple = 'pt'
elif is_tf_available():
__snake_case : str = 'tf'
else:
__snake_case : str = 'jax'
class UpperCamelCase ( a , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : str =PerceiverTokenizer
_lowerCamelCase : Optional[int] =False
def A__ ( self : Optional[int] ):
super().setUp()
A__ = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A__ ( self : int ):
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def A__ ( self : Optional[int] , **_lowerCamelCase : List[str] ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def A__ ( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : List[str]=2_0 , _lowerCamelCase : Union[str, Any]=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
A__ = []
for i in range(len(_lowerCamelCase ) ):
try:
A__ = tokenizer.decode([i] , clean_up_tokenization_spaces=_lowerCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
A__ = list(filter(lambda _lowerCamelCase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , _lowerCamelCase ) )
A__ = list(filter(lambda _lowerCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_lowerCamelCase ) , _lowerCamelCase ) )
if max_length is not None and len(_lowerCamelCase ) > max_length:
A__ = toks[:max_length]
if min_length is not None and len(_lowerCamelCase ) < min_length and len(_lowerCamelCase ) > 0:
while len(_lowerCamelCase ) < min_length:
A__ = toks + toks
# toks_str = [t[1] for t in toks]
A__ = [t[0] for t in toks]
# Ensure consistency
A__ = tokenizer.decode(_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
if " " not in output_txt and len(_lowerCamelCase ) > 1:
A__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowerCamelCase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowerCamelCase )
)
if with_prefix_space:
A__ = ''' ''' + output_txt
A__ = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
return output_txt, output_ids
def A__ ( self : List[str] ):
A__ = self.perceiver_tokenizer
A__ = '''Unicode €.'''
A__ = tokenizer(_lowerCamelCase )
A__ = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['''input_ids'''] , _lowerCamelCase )
# decoding
A__ = tokenizer.decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , '''[CLS]Unicode €.[SEP]''' )
A__ = tokenizer('''e è é ê ë''' )
A__ = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['''input_ids'''] , _lowerCamelCase )
# decoding
A__ = tokenizer.decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def A__ ( self : int ):
A__ = self.perceiver_tokenizer
A__ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
A__ = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
A__ = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
if FRAMEWORK != "jax":
A__ = list(batch.input_ids.numpy()[0] )
else:
A__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def A__ ( self : Union[str, Any] ):
A__ = self.perceiver_tokenizer
A__ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
A__ = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _lowerCamelCase )
self.assertIn('''attention_mask''' , _lowerCamelCase )
self.assertNotIn('''decoder_input_ids''' , _lowerCamelCase )
self.assertNotIn('''decoder_attention_mask''' , _lowerCamelCase )
def A__ ( self : str ):
A__ = self.perceiver_tokenizer
A__ = [
'''Summary of the text.''',
'''Another summary.''',
]
A__ = tokenizer(
text_target=_lowerCamelCase , max_length=3_2 , padding='''max_length''' , truncation=_lowerCamelCase , return_tensors=_lowerCamelCase )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def A__ ( self : str ):
# safety check on max_len default value so we are sure the test works
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
A__ = tempfile.mkdtemp()
A__ = ''' He is very happy, UNwant\u00E9d,running'''
A__ = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
A__ = tokenizer.__class__.from_pretrained(_lowerCamelCase )
A__ = after_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
shutil.rmtree(_lowerCamelCase )
A__ = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
A__ = tempfile.mkdtemp()
A__ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
A__ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
A__ = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
A__ = tokenizer.__class__.from_pretrained(_lowerCamelCase )
A__ = after_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
A__ = tokenizer.__class__.from_pretrained(_lowerCamelCase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(_lowerCamelCase )
def A__ ( self : Union[str, Any] ):
A__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
A__ = json.load(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
A__ = json.load(_lowerCamelCase )
A__ = [F'''<extra_id_{i}>''' for i in range(1_2_5 )]
A__ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
A__ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_lowerCamelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
with open(os.path.join(_lowerCamelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
A__ = tokenizer_class.from_pretrained(
_lowerCamelCase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
A__ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_lowerCamelCase )]
A__ = tokenizer_class.from_pretrained(
_lowerCamelCase , additional_special_tokens=_lowerCamelCase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def A__ ( self : str ):
A__ = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '''�''' )
def A__ ( self : Union[str, Any] ):
pass
def A__ ( self : Optional[Any] ):
pass
def A__ ( self : Any ):
pass
def A__ ( self : int ):
pass
def A__ ( self : List[str] ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
A__ = self.get_tokenizers(fast=_lowerCamelCase , do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
A__ = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
A__ = tokenizer.convert_tokens_to_string(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
| 571
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case : Optional[int] = {'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = ['ViTFeatureExtractor']
__snake_case : Any = ['ViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'VIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTForImageClassification',
'ViTForMaskedImageModeling',
'ViTModel',
'ViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
'TFViTForImageClassification',
'TFViTModel',
'TFViTPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'FlaxViTForImageClassification',
'FlaxViTModel',
'FlaxViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 571
| 1
|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( A__, unittest.TestCase ):
lowercase : List[str] =ConsistencyModelPipeline
lowercase : Dict =UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase : List[Any] =UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowercase : Dict =frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCamelCase__ : Tuple=False ) -> List[Any]:
'''simple docstring'''
if class_cond:
UpperCAmelCase = self.dummy_cond_unet
else:
UpperCAmelCase = self.dummy_uncond_unet
# Default to CM multistep sampler
UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase = {
"unet": unet,
"scheduler": scheduler,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict=0 ) -> Any:
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("mps" ):
UpperCAmelCase = torch.manual_seed(UpperCamelCase__ )
else:
UpperCAmelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
UpperCAmelCase = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
'''simple docstring'''
UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = ConsistencyModelPipeline(**UpperCamelCase__ )
UpperCAmelCase = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCAmelCase = pipe(**UpperCamelCase__ ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components(class_cond=UpperCamelCase__ )
UpperCAmelCase = ConsistencyModelPipeline(**UpperCamelCase__ )
UpperCAmelCase = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCAmelCase = 0
UpperCAmelCase = pipe(**UpperCamelCase__ ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = ConsistencyModelPipeline(**UpperCamelCase__ )
UpperCAmelCase = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCAmelCase = 1
UpperCAmelCase = None
UpperCAmelCase = pipe(**UpperCamelCase__ ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components(class_cond=UpperCamelCase__ )
UpperCAmelCase = ConsistencyModelPipeline(**UpperCamelCase__ )
UpperCAmelCase = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCAmelCase = 1
UpperCAmelCase = None
UpperCAmelCase = 0
UpperCAmelCase = pipe(**UpperCamelCase__ ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCamelCase__ : Any=0 , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : List[Any]="cpu" , UpperCamelCase__ : Any=torch.floataa , UpperCamelCase__ : Dict=(1, 3, 64, 64) ) -> Dict:
'''simple docstring'''
UpperCAmelCase = torch.manual_seed(UpperCamelCase__ )
UpperCAmelCase = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
UpperCAmelCase = self.get_fixed_latents(seed=UpperCamelCase__ , device=UpperCamelCase__ , dtype=UpperCamelCase__ , shape=UpperCamelCase__ )
UpperCAmelCase = latents
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Tuple="cpu" , UpperCamelCase__ : Dict=torch.floataa , UpperCamelCase__ : int=(1, 3, 64, 64) ) -> Tuple:
'''simple docstring'''
if type(UpperCamelCase__ ) == str:
UpperCAmelCase = torch.device(UpperCamelCase__ )
UpperCAmelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
UpperCAmelCase = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=UpperCamelCase__ , dtype=UpperCamelCase__ )
return latents
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase = ConsistencyModelPipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
pipe.to(torch_device=UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = self.get_inputs()
UpperCAmelCase = pipe(**UpperCamelCase__ ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> int:
'''simple docstring'''
UpperCAmelCase = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase = ConsistencyModelPipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
pipe.to(torch_device=UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = self.get_inputs()
UpperCAmelCase = 1
UpperCAmelCase = None
UpperCAmelCase = pipe(**UpperCamelCase__ ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase = ConsistencyModelPipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
pipe.to(torch_device=UpperCamelCase__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = self.get_inputs(get_fixed_latents=UpperCamelCase__ , device=UpperCamelCase__ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCamelCase__ , enable_math=UpperCamelCase__ , enable_mem_efficient=UpperCamelCase__ ):
UpperCAmelCase = pipe(**UpperCamelCase__ ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase = ConsistencyModelPipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
pipe.to(torch_device=UpperCamelCase__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = self.get_inputs(get_fixed_latents=UpperCamelCase__ , device=UpperCamelCase__ )
UpperCAmelCase = 1
UpperCAmelCase = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCamelCase__ , enable_math=UpperCamelCase__ , enable_mem_efficient=UpperCamelCase__ ):
UpperCAmelCase = pipe(**UpperCamelCase__ ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 457
|
def lowerCamelCase_(lowerCamelCase_ ) -> list[int]:
UpperCAmelCase = len(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
for j in range(i + 1 , lowerCamelCase_ ):
if numbers[j] < numbers[i]:
UpperCAmelCase , UpperCAmelCase = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__lowerCamelCase : int = input("Enter numbers separated by a comma:\n").strip()
__lowerCamelCase : Dict = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 457
| 1
|
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
a_ :Optional[int] = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def a ( A__ ) -> Union[str, Any]:
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
a_ :List[Any] = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
a_ :Dict = parser.parse_args()
if args.check_lib:
a_ :List[Any] = importlib.import_module('transformers')
a_ :List[str] = Path(transformers_module.__file__).parent
else:
a_ :Union[str, Any] = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 35
|
import random
def a ( A__ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = num - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE__ : int = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pow(A__ , A__ , A__ )
if v != 1:
SCREAMING_SNAKE_CASE__ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE__ : Any = i + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (v**2) % num
return True
def a ( A__ ) -> bool:
'''simple docstring'''
if num < 2:
return False
SCREAMING_SNAKE_CASE__ : Optional[int] = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(A__ )
def a ( A__ = 1_0_2_4 ) -> int:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ : Any = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(A__ ):
return num
if __name__ == "__main__":
a_ :Dict = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 35
| 1
|
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=8 ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCamelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=5_12 , lowercase_=5_12 ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
__UpperCamelCase = np.array(pil_image.convert('''RGB''' ) )
__UpperCamelCase = arr.astype(np.floataa ) / 127.5 - 1
__UpperCamelCase = np.transpose(lowercase_ , [2, 0, 1] )
__UpperCamelCase = torch.from_numpy(lowercase_ ).unsqueeze(0 )
return image
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Optional[int] , snake_case : UNetaDConditionModel , snake_case : DDPMScheduler , snake_case : VQModel , ):
super().__init__()
self.register_modules(
unet=snake_case , scheduler=snake_case , movq=snake_case , )
__UpperCamelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case ( self : int , snake_case : Optional[Any] , snake_case : str , snake_case : Tuple ):
# get the original timestep using init_timestep
__UpperCamelCase = min(int(num_inference_steps * strength ) , snake_case )
__UpperCamelCase = max(num_inference_steps - init_timestep , 0 )
__UpperCamelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case ( self : str , snake_case : Dict , snake_case : List[str] , snake_case : Any , snake_case : str , snake_case : Dict , snake_case : Optional[Any] , snake_case : int=None ):
if not isinstance(snake_case , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case )}" )
__UpperCamelCase = image.to(device=snake_case , dtype=snake_case )
__UpperCamelCase = batch_size * num_images_per_prompt
if image.shape[1] == 4:
__UpperCamelCase = image
else:
if isinstance(snake_case , snake_case ) and len(snake_case ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(snake_case )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(snake_case , snake_case ):
__UpperCamelCase = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case )
]
__UpperCamelCase = torch.cat(snake_case , dim=0 )
else:
__UpperCamelCase = self.movq.encode(snake_case ).latent_dist.sample(snake_case )
__UpperCamelCase = self.movq.config.scaling_factor * init_latents
__UpperCamelCase = torch.cat([init_latents] , dim=0 )
__UpperCamelCase = init_latents.shape
__UpperCamelCase = randn_tensor(snake_case , generator=snake_case , device=snake_case , dtype=snake_case )
# get latents
__UpperCamelCase = self.scheduler.add_noise(snake_case , snake_case , snake_case )
__UpperCamelCase = init_latents
return latents
def snake_case ( self : Optional[int] , snake_case : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__UpperCamelCase = torch.device(F"cuda:{gpu_id}" )
__UpperCamelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case , snake_case )
def snake_case ( self : Dict , snake_case : Dict=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
__UpperCamelCase = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCamelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCamelCase , __UpperCamelCase = cpu_offload_with_hook(snake_case , snake_case , prev_module_hook=snake_case )
# We'll offload the last model manually.
__UpperCamelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : Dict ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case )
def __call__( self : Union[str, Any] , snake_case : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , snake_case : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case : int = 512 , snake_case : int = 512 , snake_case : int = 100 , snake_case : float = 4.0 , snake_case : float = 0.3 , snake_case : int = 1 , snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case : Optional[str] = "pil" , snake_case : bool = True , ):
__UpperCamelCase = self._execution_device
__UpperCamelCase = guidance_scale > 1.0
if isinstance(snake_case , snake_case ):
__UpperCamelCase = torch.cat(snake_case , dim=0 )
__UpperCamelCase = image_embeds.shape[0]
if isinstance(snake_case , snake_case ):
__UpperCamelCase = torch.cat(snake_case , dim=0 )
if do_classifier_free_guidance:
__UpperCamelCase = image_embeds.repeat_interleave(snake_case , dim=0 )
__UpperCamelCase = negative_image_embeds.repeat_interleave(snake_case , dim=0 )
__UpperCamelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case )
if not isinstance(snake_case , snake_case ):
__UpperCamelCase = [image]
if not all(isinstance(snake_case , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(snake_case ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
__UpperCamelCase = torch.cat([prepare_image(snake_case , snake_case , snake_case ) for i in image] , dim=0 )
__UpperCamelCase = image.to(dtype=image_embeds.dtype , device=snake_case )
__UpperCamelCase = self.movq.encode(snake_case )['''latents''']
__UpperCamelCase = latents.repeat_interleave(snake_case , dim=0 )
self.scheduler.set_timesteps(snake_case , device=snake_case )
__UpperCamelCase , __UpperCamelCase = self.get_timesteps(snake_case , snake_case , snake_case )
__UpperCamelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt )
__UpperCamelCase , __UpperCamelCase = downscale_height_and_width(snake_case , snake_case , self.movq_scale_factor )
__UpperCamelCase = self.prepare_latents(
snake_case , snake_case , snake_case , snake_case , image_embeds.dtype , snake_case , snake_case )
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
__UpperCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__UpperCamelCase = {'''image_embeds''': image_embeds}
__UpperCamelCase = self.unet(
sample=snake_case , timestep=snake_case , encoder_hidden_states=snake_case , added_cond_kwargs=snake_case , return_dict=snake_case , )[0]
if do_classifier_free_guidance:
__UpperCamelCase , __UpperCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
__UpperCamelCase , __UpperCamelCase = noise_pred.chunk(2 )
__UpperCamelCase , __UpperCamelCase = variance_pred.chunk(2 )
__UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCamelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCamelCase , __UpperCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__UpperCamelCase = self.scheduler.step(
snake_case , snake_case , snake_case , generator=snake_case , )[0]
# post-processing
__UpperCamelCase = self.movq.decode(snake_case , force_not_quantize=snake_case )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
__UpperCamelCase = image * 0.5 + 0.5
__UpperCamelCase = image.clamp(0 , 1 )
__UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
| 719
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : Dict = ["input_features", "is_longer"]
def __init__( self : Dict , snake_case : int=64 , snake_case : Dict=48000 , snake_case : Tuple=480 , snake_case : Optional[Any]=10 , snake_case : Union[str, Any]=1024 , snake_case : Union[str, Any]=0.0 , snake_case : List[str]=False , snake_case : float = 0 , snake_case : float = 14000 , snake_case : int = None , snake_case : str = "fusion" , snake_case : str = "repeatpad" , **snake_case : int , ):
super().__init__(
feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , return_attention_mask=snake_case , **snake_case , )
__UpperCamelCase = top_db
__UpperCamelCase = truncation
__UpperCamelCase = padding
__UpperCamelCase = fft_window_size
__UpperCamelCase = (fft_window_size >> 1) + 1
__UpperCamelCase = hop_length
__UpperCamelCase = max_length_s
__UpperCamelCase = max_length_s * sampling_rate
__UpperCamelCase = sampling_rate
__UpperCamelCase = frequency_min
__UpperCamelCase = frequency_max
__UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm=snake_case , mel_scale='''htk''' , )
__UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm='''slaney''' , mel_scale='''slaney''' , )
def snake_case ( self : Union[str, Any] ):
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def snake_case ( self : Tuple , snake_case : np.array , snake_case : Optional[np.array] = None ):
__UpperCamelCase = spectrogram(
snake_case , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case , log_mel='''dB''' , )
return log_mel_spectrogram.T
def snake_case ( self : Optional[int] , snake_case : Any , snake_case : List[Any] , snake_case : int ):
__UpperCamelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__UpperCamelCase = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__UpperCamelCase = [0]
# randomly choose index for each part
__UpperCamelCase = np.random.choice(ranges[0] )
__UpperCamelCase = np.random.choice(ranges[1] )
__UpperCamelCase = np.random.choice(ranges[2] )
__UpperCamelCase = mel[idx_front : idx_front + chunk_frames, :]
__UpperCamelCase = mel[idx_middle : idx_middle + chunk_frames, :]
__UpperCamelCase = mel[idx_back : idx_back + chunk_frames, :]
__UpperCamelCase = torch.tensor(mel[None, None, :] )
__UpperCamelCase = torch.nn.functional.interpolate(
snake_case , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=snake_case )
__UpperCamelCase = mel_shrink[0][0].numpy()
__UpperCamelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def snake_case ( self : Optional[Any] , snake_case : np.array , snake_case : Optional[int] , snake_case : Tuple , snake_case : str ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__UpperCamelCase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__UpperCamelCase = len(snake_case ) - max_length
__UpperCamelCase = np.random.randint(0 , overflow + 1 )
__UpperCamelCase = waveform[idx : idx + max_length]
__UpperCamelCase = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__UpperCamelCase = self._np_extract_fbank_features(snake_case , self.mel_filters )
__UpperCamelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__UpperCamelCase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__UpperCamelCase = np.stack([mel, mel, mel, mel] , axis=0 )
__UpperCamelCase = False
else:
__UpperCamelCase = self._random_mel_fusion(snake_case , snake_case , snake_case )
__UpperCamelCase = True
else:
raise NotImplementedError(F"data_truncating {truncation} not implemented" )
else:
__UpperCamelCase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__UpperCamelCase = int(max_length / len(snake_case ) )
__UpperCamelCase = np.stack(np.tile(snake_case , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__UpperCamelCase = int(max_length / len(snake_case ) )
__UpperCamelCase = np.stack(np.tile(snake_case , snake_case ) )
__UpperCamelCase = np.pad(snake_case , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
__UpperCamelCase = self._np_extract_fbank_features(snake_case , self.mel_filters )
__UpperCamelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__UpperCamelCase = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[str] , snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case : str = None , snake_case : Optional[str] = None , snake_case : Optional[int] = None , snake_case : Optional[int] = None , snake_case : Optional[Union[str, TensorType]] = None , **snake_case : Any , ):
__UpperCamelCase = truncation if truncation is not None else self.truncation
__UpperCamelCase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__UpperCamelCase = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
__UpperCamelCase = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCamelCase = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
__UpperCamelCase = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCamelCase = [np.asarray(snake_case )]
# convert to mel spectrogram, truncate and pad if needed.
__UpperCamelCase = [
self._get_input_mel(snake_case , max_length if max_length else self.nb_max_samples , snake_case , snake_case )
for waveform in raw_speech
]
__UpperCamelCase = []
__UpperCamelCase = []
for mel, longer in padded_inputs:
input_mel.append(snake_case )
is_longer.append(snake_case )
if truncation == "fusion" and sum(snake_case ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__UpperCamelCase = np.random.randint(0 , len(snake_case ) )
__UpperCamelCase = True
if isinstance(input_mel[0] , snake_case ):
__UpperCamelCase = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__UpperCamelCase = [[longer] for longer in is_longer]
__UpperCamelCase = {'''input_features''': input_mel, '''is_longer''': is_longer}
__UpperCamelCase = BatchFeature(snake_case )
if return_tensors is not None:
__UpperCamelCase = input_features.convert_to_tensors(snake_case )
return input_features
| 375
| 0
|
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def A__ ( A__ ) -> Optional[int]:
'''simple docstring'''
return TrainCommand(A__ )
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@staticmethod
def __A ( snake_case_ ) -> Optional[Any]:
_UpperCAmelCase = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=snake_case_ , required=snake_case_ , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=snake_case_ , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=snake_case_ , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=snake_case_ , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=snake_case_ , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=snake_case_ , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=snake_case_ , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=snake_case_ , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=snake_case_ , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=snake_case_ , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=snake_case_ , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=snake_case_ , default=3e-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=snake_case_ , default=1e-0_8 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ ) -> Any:
_UpperCAmelCase = logging.get_logger("transformers-cli/training" )
_UpperCAmelCase = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=snake_case_ )
_UpperCAmelCase = args.output
_UpperCAmelCase = args.column_label
_UpperCAmelCase = args.column_text
_UpperCAmelCase = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
_UpperCAmelCase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
_UpperCAmelCase = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_UpperCAmelCase = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
_UpperCAmelCase = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_UpperCAmelCase = args.validation_split
_UpperCAmelCase = args.train_batch_size
_UpperCAmelCase = args.valid_batch_size
_UpperCAmelCase = args.learning_rate
_UpperCAmelCase = args.adam_epsilon
def __A ( self ) -> Union[str, Any]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __A ( self ) -> Tuple:
raise NotImplementedError
def __A ( self ) -> Optional[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 426
|
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
SCREAMING_SNAKE_CASE_ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A__ ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = os.path.dirname(os.path.realpath(A__ ) )
_UpperCAmelCase = os.path.join(A__ , "words.txt" )
_UpperCAmelCase = ""
with open(A__ ) as f:
_UpperCAmelCase = f.readline()
_UpperCAmelCase = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
_UpperCAmelCase = [
word
for word in [sum(ord(A__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(A__ )
if __name__ == "__main__":
print(solution())
| 426
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : Union[str, Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A : Tuple = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A : Tuple = False
A : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str=False):
"""simple docstring"""
a : str = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if return_labels:
if model_class in get_values(UpperCAmelCase_):
a : Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
return inputs_dict
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int=1_3 , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=9_9 , UpperCAmelCase_ : Optional[int]=3_2 , UpperCAmelCase_ : List[Any]=3_2 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : str=3_7 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[Any]=5_1_2 , UpperCAmelCase_ : str=1_6 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : List[str]=None , ):
"""simple docstring"""
a : str = parent
a : Any = batch_size
a : Optional[int] = seq_length
a : Any = is_training
a : Optional[int] = use_input_mask
a : str = use_token_type_ids
a : int = use_labels
a : int = vocab_size
a : Any = hidden_size
a : Union[str, Any] = num_hidden_layers
a : Dict = num_attention_heads
a : Tuple = intermediate_size
a : List[Any] = hidden_act
a : Optional[Any] = hidden_dropout_prob
a : int = attention_probs_dropout_prob
a : List[Any] = max_position_embeddings
a : Dict = type_vocab_size
a : Any = type_sequence_label_size
a : Dict = initializer_range
a : str = num_labels
a : str = num_choices
a : Tuple = scope
a : int = embedding_size
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : Any = None
if self.use_input_mask:
a : Dict = random_attention_mask([self.batch_size, self.seq_length])
a : List[str] = None
if self.use_token_type_ids:
a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a : List[Any] = None
a : str = None
a : Optional[Any] = None
if self.use_labels:
a : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : List[str] = ids_tensor([self.batch_size] , self.num_choices)
a : List[str] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : List[Any] = TFMobileBertModel(config=UpperCAmelCase_)
a : Dict = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a : List[Any] = model(UpperCAmelCase_)
a : Any = [input_ids, input_mask]
a : List[Any] = model(UpperCAmelCase_)
a : Tuple = model(UpperCAmelCase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
a : int = TFMobileBertForMaskedLM(config=UpperCAmelCase_)
a : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a : int = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : Any = TFMobileBertForNextSentencePrediction(config=UpperCAmelCase_)
a : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a : Any = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int):
"""simple docstring"""
a : Optional[Any] = TFMobileBertForPreTraining(config=UpperCAmelCase_)
a : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a : List[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any):
"""simple docstring"""
a : Dict = self.num_labels
a : Dict = TFMobileBertForSequenceClassification(config=UpperCAmelCase_)
a : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a : int = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : List[str] = self.num_choices
a : Optional[int] = TFMobileBertForMultipleChoice(config=UpperCAmelCase_)
a : Dict = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1) , (1, self.num_choices, 1))
a : List[Any] = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1) , (1, self.num_choices, 1))
a : List[Any] = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1) , (1, self.num_choices, 1))
a : Dict = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
a : Dict = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Union[str, Any] = self.num_labels
a : Any = TFMobileBertForTokenClassification(config=UpperCAmelCase_)
a : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int):
"""simple docstring"""
a : Optional[int] = TFMobileBertForQuestionAnswering(config=UpperCAmelCase_)
a : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a : Any = model(UpperCAmelCase_)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Dict = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Any = config_and_inputs
a : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self)
a : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
a : Optional[int] = TFMobileBertModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@require_tf
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : str = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased')
a : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]])
a : Union[str, Any] = model(UpperCAmelCase_)[0]
a : str = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , UpperCAmelCase_)
a : Optional[Any] = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
])
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1e-4)
| 610
|
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCamelCase : int = """\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
UpperCamelCase : List[str] = """\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
UpperCamelCase : Tuple = """
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
a : Optional[Any] = simple_accuracy(snake_case , snake_case )
a : Dict = float(fa_score(y_true=snake_case , y_pred=snake_case ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : int ) -> Optional[int]:
"""simple docstring"""
a : Union[str, Any] = np.array(snake_case )
a : Any = np.array(snake_case )
a : Tuple = en_sentvecs.shape[0]
# mean centering
a : Tuple = en_sentvecs - np.mean(snake_case , axis=0 )
a : Optional[Any] = in_sentvecs - np.mean(snake_case , axis=0 )
a : Optional[int] = cdist(snake_case , snake_case , 'cosine' )
a : List[Any] = np.array(range(snake_case ) )
a : str = sim.argsort(axis=1 )[:, :10]
a : int = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
'references': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
}) , codebase_urls=[] , reference_urls=[] , format='numpy' if self.config_name != 'cvit-mkb-clsr' else None , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int):
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(UpperCAmelCase_ , UpperCAmelCase_)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(UpperCAmelCase_ , UpperCAmelCase_)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_)}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
| 610
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_a : str = logging.get_logger(__name__)
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : List[Any] = ["pixel_values"]
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : int = 32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : bool = True , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> None:
__snake_case = do_resize
__snake_case = do_rescale
__snake_case = size_divisor
__snake_case = resample
super().__init__(**SCREAMING_SNAKE_CASE_ )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[ChannelDimension] = None , **SCREAMING_SNAKE_CASE_ : Tuple ) -> np.ndarray:
__snake_case , __snake_case = get_image_size(SCREAMING_SNAKE_CASE_ )
# Rounds the height and width down to the closest multiple of size_divisor
__snake_case = height // size_divisor * size_divisor
__snake_case = width // size_divisor * size_divisor
__snake_case = resize(SCREAMING_SNAKE_CASE_ , (new_h, new_w) , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return image
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[ChannelDimension] = None , **SCREAMING_SNAKE_CASE_ : List[str] ) -> np.ndarray:
return rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[TensorType, str]] = None , SCREAMING_SNAKE_CASE_ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> BatchFeature:
__snake_case = do_resize if do_resize is not None else self.do_resize
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = size_divisor if size_divisor is not None else self.size_divisor
__snake_case = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
__snake_case = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for img in images]
if do_resize:
__snake_case = [self.resize(SCREAMING_SNAKE_CASE_ , size_divisor=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
__snake_case = [self.rescale(SCREAMING_SNAKE_CASE_ , scale=1 / 255 ) for image in images]
__snake_case = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
__snake_case = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 56
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : List[str] = logging.get_logger(__name__)
a__ : str = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Any = "xlm-roberta"
def __init__( self : int , UpperCAmelCase__ : Union[str, Any]=3_0_5_2_2 , UpperCAmelCase__ : Optional[Any]=7_6_8 , UpperCAmelCase__ : Optional[int]=1_2 , UpperCAmelCase__ : Tuple=1_2 , UpperCAmelCase__ : str=3_0_7_2 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[int]=5_1_2 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Any="absolute" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : int , ) -> Tuple:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: "batch", 1: "choice", 2: "sequence"}
else:
__SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 682
| 0
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
UpperCAmelCase_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
UpperCAmelCase_ = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def lowerCamelCase__ ( A__ : List[str] , A__ : Optional[int] ):
'''simple docstring'''
with open(A__ , """r""" , encoding="""utf-8""" ) as f:
__lowerCamelCase = json.loads(f.read() )
__lowerCamelCase = collections.OrderedDict()
__lowerCamelCase = collections.OrderedDict()
__lowerCamelCase = collections.OrderedDict()
with open(A__ , """r""" , encoding="""utf-8""" ) as f:
__lowerCamelCase = f.readlines()
__lowerCamelCase = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(A__ ):
__lowerCamelCase = b
__lowerCamelCase = idx
for wd in b:
__lowerCamelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = ['input_ids', 'attention_mask']
def __init__( self: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: str="<|endoftext|>" , UpperCamelCase_: Dict="<|endoftext|>" , UpperCamelCase_: List[str]="<|startoftext|>" , UpperCamelCase_: List[Any]="<|endoftext|>" , UpperCamelCase_: Optional[Any]=False , **UpperCamelCase_: Tuple , ):
super().__init__(
unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , do_clean_text=UpperCamelCase_ , **UpperCamelCase_ , )
if not os.path.isfile(UpperCamelCase_ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(UpperCamelCase_ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
__lowerCamelCase = do_clean_text
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = load_vocab_and_emoji(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowerCAmelCase__ ( self: Tuple ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def lowerCAmelCase__ ( self: List[Any] ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Dict ):
return self.subword_tokenizer.tokenize(UpperCamelCase_ , clean=self.do_clean_text )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Optional[Any] ):
return self.vocab.get(UpperCamelCase_ , self.vocab.get(self.unk_token ) )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any ):
return self.subword_tokenizer.convert_id_to_token(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = """""".join(UpperCamelCase_ ).strip()
return out_string
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: "Conversation" ):
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [self.eos_token_id] )
if len(UpperCamelCase_ ) > self.model_max_length:
__lowerCamelCase = input_ids[-self.model_max_length :]
return input_ids
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
__lowerCamelCase = 0
if os.path.isdir(UpperCamelCase_ ):
__lowerCamelCase = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
__lowerCamelCase = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
__lowerCamelCase = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
__lowerCamelCase = token_index
writer.write(""",""".join(UpperCamelCase_ ) + """\n""" )
index += 1
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , UpperCamelCase_ )
return vocab_file, emoji_file
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict , UpperCamelCase_: int ):
__lowerCamelCase = vocab # same as swe
__lowerCamelCase = ids_to_tokens # same as bpe
__lowerCamelCase = emoji
__lowerCamelCase = np.max([len(UpperCamelCase_ ) for w in self.vocab.keys()] )
__lowerCamelCase = re.compile(r"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
__lowerCamelCase = re.compile(r"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
__lowerCamelCase = re.compile(r"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
__lowerCamelCase = re.compile(
r"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__lowerCamelCase = re.compile(
r"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__lowerCamelCase = re.compile(
r"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
__lowerCamelCase = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
__lowerCamelCase = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
__lowerCamelCase = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self: int ):
return len(self.ids_to_tokens )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: List[Any] ):
__lowerCamelCase = self.content_repattera.sub("""<URL>""" , UpperCamelCase_ )
__lowerCamelCase = self.content_repattera.sub("""<EMAIL>""" , UpperCamelCase_ )
__lowerCamelCase = self.content_repattera.sub("""<TEL>""" , UpperCamelCase_ )
__lowerCamelCase = self.content_repattera.sub("""<DATE>""" , UpperCamelCase_ )
__lowerCamelCase = self.content_repattera.sub("""<DATE>""" , UpperCamelCase_ )
__lowerCamelCase = self.content_repattera.sub("""<PRICE>""" , UpperCamelCase_ )
__lowerCamelCase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__lowerCamelCase = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: str=False ):
__lowerCamelCase = text.replace(""" """ , """<SP>""" )
__lowerCamelCase = text.replace(""" """ , """<SP>""" )
__lowerCamelCase = text.replace("""\r\n""" , """<BR>""" )
__lowerCamelCase = text.replace("""\n""" , """<BR>""" )
__lowerCamelCase = text.replace("""\r""" , """<BR>""" )
__lowerCamelCase = text.replace("""\t""" , """<TAB>""" )
__lowerCamelCase = text.replace("""—""" , """ー""" )
__lowerCamelCase = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
__lowerCamelCase = text.replace(UpperCamelCase_ , UpperCamelCase_ )
if clean:
__lowerCamelCase = self.clean_text(UpperCamelCase_ )
def check_simbol(UpperCamelCase_: int ):
__lowerCamelCase = x.encode()
if len(UpperCamelCase_ ) == 1 and len(UpperCamelCase_ ) == 2:
__lowerCamelCase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = x.encode()
if len(UpperCamelCase_ ) == 1 and len(UpperCamelCase_ ) == 3:
__lowerCamelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe28080 and c <= 0xe2b07f:
return True
return False
__lowerCamelCase = 0
__lowerCamelCase = []
while pos < len(UpperCamelCase_ ):
__lowerCamelCase = min(len(UpperCamelCase_ ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
__lowerCamelCase = [] # (token_id, token, pos)
for e in range(UpperCamelCase_ , UpperCamelCase_ , -1 ):
__lowerCamelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(UpperCamelCase_ ) > 2:
__lowerCamelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(UpperCamelCase_ ) > 0:
# the smallest token_id is adopted
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x[0] )[0]
result.append(UpperCamelCase_ )
__lowerCamelCase = e
else:
__lowerCamelCase = pos + 1
__lowerCamelCase = text[pos:end]
if check_simbol(UpperCamelCase_ ):
result.append("""<KIGOU>""" )
elif checkuae(UpperCamelCase_ ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
__lowerCamelCase = end
return result
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any]="\n" ):
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(UpperCamelCase_ ) > 0:
words.append(bytearray(UpperCamelCase_ ).decode("""utf-8""" , errors="""replace""" ) )
__lowerCamelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(UpperCamelCase_ )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
words.append(bytearray(UpperCamelCase_ ).decode("""utf-8""" , errors="""replace""" ) )
__lowerCamelCase = """""".join(UpperCamelCase_ )
return text
| 714
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
UpperCAmelCase_ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
UpperCAmelCase_ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = ' Hello world! cécé herlolip'
UpperCAmelCase_ = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def lowerCamelCase__ ( A__ : Tuple , A__ : Any , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = dct.pop(A__ )
__lowerCamelCase = val
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = torch.load(A__ , map_location="""cpu""" )
__lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = emb.weight.shape
__lowerCamelCase = nn.Linear(A__ , A__ , bias=A__ )
__lowerCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict=None ):
'''simple docstring'''
if not os.path.exists(A__ ):
__lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , A__ ).eval()
else:
__lowerCamelCase = load_xsum_checkpoint(A__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__lowerCamelCase = checkpoint_path.replace(""".""" , """-""" )
__lowerCamelCase = BartConfig.from_pretrained(A__ )
__lowerCamelCase = bart.encode(A__ ).unsqueeze(0 )
__lowerCamelCase = BartTokenizer.from_pretrained(A__ ).encode(A__ , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(A__ , A__ ).all():
raise ValueError(
f'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' )
if checkpoint_path == "bart.large.mnli":
__lowerCamelCase = bart.state_dict()
remove_ignore_keys_(A__ )
__lowerCamelCase = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(A__ , A__ , A__ )
__lowerCamelCase = BartForSequenceClassification(A__ ).eval()
model.load_state_dict(A__ )
__lowerCamelCase = bart.predict("""mnli""" , A__ , return_logits=A__ )
__lowerCamelCase = model(A__ )[0] # logits
else: # no classification heads to worry about
__lowerCamelCase = bart.model.state_dict()
remove_ignore_keys_(A__ )
__lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""]
__lowerCamelCase = bart.extract_features(A__ )
if hf_checkpoint_name == "facebook/bart-large":
__lowerCamelCase = BartModel(A__ ).eval()
model.load_state_dict(A__ )
__lowerCamelCase = model(A__ ).model[0]
else:
__lowerCamelCase = BartForConditionalGeneration(A__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(A__ )
if hasattr(A__ , """lm_head""" ):
__lowerCamelCase = make_linear_from_emb(model.model.shared )
__lowerCamelCase = model.model(A__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
UpperCAmelCase_ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 80
| 0
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
UpperCAmelCase__ = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class a__ ( unittest.TestCase , a_ ):
'''simple docstring'''
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
__A= load_tool('text-question-answering' )
self.tool.setup()
__A= load_tool('text-question-answering' , remote=lowerCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
__A= self.tool(lowerCAmelCase_ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(lowerCAmelCase_ , 'launched the BigScience Research Workshop' )
def lowerCAmelCase ( self : Dict ) -> str:
__A= self.remote_tool(lowerCAmelCase_ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(lowerCAmelCase_ , 'launched the BigScience Research Workshop' )
def lowerCAmelCase ( self : List[str] ) -> int:
__A= self.tool(text=lowerCAmelCase_ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(lowerCAmelCase_ , 'launched the BigScience Research Workshop' )
def lowerCAmelCase ( self : Dict ) -> Dict:
__A= self.remote_tool(text=lowerCAmelCase_ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(lowerCAmelCase_ , 'launched the BigScience Research Workshop' )
| 186
|
'''simple docstring'''
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int,_SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def UpperCAmelCase__( ):
"""simple docstring"""
assert or_gate(0,0 ) == 0
assert or_gate(0,1 ) == 1
assert or_gate(1,0 ) == 1
assert or_gate(1,1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 186
| 1
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_lowercase = datasets.utils.logging.get_logger(__name__)
@dataclass
class _lowercase ( datasets.BuilderConfig ):
_UpperCAmelCase = 10_000
_UpperCAmelCase = None
_UpperCAmelCase = None
class _lowercase ( datasets.ArrowBasedBuilder ):
_UpperCAmelCase = ParquetConfig
def UpperCamelCase ( self ) -> Tuple:
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase ( self , A__ ) -> str:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
snake_case = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
snake_case = data_files
if isinstance(A__ , A__ ):
snake_case = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
snake_case = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
snake_case = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case = [dl_manager.iter_files(A__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(A__ ):
with open(A__ , '''rb''' ) as f:
snake_case = datasets.Features.from_arrow_schema(pq.read_schema(A__ ) )
break
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={'''files''': files} ) )
return splits
def UpperCamelCase ( self , A__ ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case = table_cast(A__ , self.info.features.arrow_schema )
return pa_table
def UpperCamelCase ( self , A__ ) -> Optional[int]:
snake_case = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
with open(A__ , '''rb''' ) as f:
snake_case = pq.ParquetFile(A__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
snake_case = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(A__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise
| 717
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=__a ):
_UpperCAmelCase = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self , *A__ , **A__ ) -> Union[str, Any]:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def UpperCamelCase ( cls , *A__ , **A__ ) -> Optional[Any]:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def UpperCamelCase ( cls , *A__ , **A__ ) -> Any:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 44
| 0
|
'''simple docstring'''
import pprint
import requests
UpperCamelCase_ = "https://zenquotes.io/api"
def lowercase__( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def lowercase__( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
UpperCamelCase_ = random_quotes()
pprint.pprint(response)
| 28
|
import qiskit
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ :Union[str, Any] = qiskit.QuantumCircuit(a , a )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE_ :Any = qiskit.execute(a , a , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = single_qubit_measure(2, 2)
print(F'''Total count for various states are: {counts}''')
| 631
| 0
|
def _lowerCAmelCase ( __magic_name__ :List[str] , __magic_name__ :Dict , __magic_name__ :Union[str, Any] ):
UpperCAmelCase_ = len(_lowerCAmelCase )
UpperCAmelCase_ = [[0] * n for i in range(_lowerCAmelCase )]
for i in range(_lowerCAmelCase ):
UpperCAmelCase_ = y_points[i]
for i in range(2 , _lowerCAmelCase ):
for j in range(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase_ = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class snake_case__ ( __snake_case ):
'''simple docstring'''
__A = '''efficientnet'''
def __init__( self : Any , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 6_00 , lowerCAmelCase_ : float = 2.0 , lowerCAmelCase_ : float = 3.1 , lowerCAmelCase_ : int = 8 , lowerCAmelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCAmelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCAmelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCAmelCase_ : List[int] = [] , lowerCAmelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCAmelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCAmelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCAmelCase_ : float = 0.25 , lowerCAmelCase_ : str = "swish" , lowerCAmelCase_ : int = 25_60 , lowerCAmelCase_ : str = "mean" , lowerCAmelCase_ : float = 0.02 , lowerCAmelCase_ : float = 0.001 , lowerCAmelCase_ : float = 0.99 , lowerCAmelCase_ : float = 0.5 , lowerCAmelCase_ : float = 0.2 , **lowerCAmelCase_ : Optional[Any] , ) -> Any:
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = width_coefficient
UpperCAmelCase_ = depth_coefficient
UpperCAmelCase_ = depth_divisor
UpperCAmelCase_ = kernel_sizes
UpperCAmelCase_ = in_channels
UpperCAmelCase_ = out_channels
UpperCAmelCase_ = depthwise_padding
UpperCAmelCase_ = strides
UpperCAmelCase_ = num_block_repeats
UpperCAmelCase_ = expand_ratios
UpperCAmelCase_ = squeeze_expansion_ratio
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dim
UpperCAmelCase_ = pooling_type
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = batch_norm_eps
UpperCAmelCase_ = batch_norm_momentum
UpperCAmelCase_ = dropout_rate
UpperCAmelCase_ = drop_connect_rate
UpperCAmelCase_ = sum(lowerCAmelCase_ ) * 4
class snake_case__ ( __snake_case ):
'''simple docstring'''
__A = version.parse('''1.11''' )
@property
def UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase ( self : Union[str, Any] ) -> float:
return 1e-5
| 407
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.