code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : List[str] ) -> Optional[Any]:
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def lowerCamelCase__ ( _lowerCamelCase : str , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]="attention" ) -> str:
lowerCamelCase_ = lowerCamelCase_ = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
lowerCamelCase_ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowerCamelCase_ = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
lowerCamelCase_ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowerCamelCase_ = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
lowerCamelCase_ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowerCamelCase_ = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
lowerCamelCase_ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowerCamelCase__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : Tuple=False ) -> Optional[int]:
if split_mlp_wi:
lowerCamelCase_ = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
lowerCamelCase_ = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
lowerCamelCase_ = (wi_a, wi_a)
else:
lowerCamelCase_ = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
lowerCamelCase_ = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def lowerCamelCase__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] ) -> int:
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def lowerCamelCase__ ( _lowerCamelCase : dict , *, _lowerCamelCase : int , _lowerCamelCase : bool , _lowerCamelCase : bool = False ) -> str:
lowerCamelCase_ = traverse_util.flatten_dict(variables['target'] )
lowerCamelCase_ = {'/'.join(_lowerCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCamelCase_ = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , _lowerCamelCase )
lowerCamelCase_ = collections.OrderedDict()
# Shared embeddings.
lowerCamelCase_ = old['token_embedder/embedding']
# Encoder.
for i in range(_lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowerCamelCase_ = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , 'encoder' , 'pre_attention_layer_norm' )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = tax_attention_lookup(_lowerCamelCase , _lowerCamelCase , 'encoder' , 'attention' )
lowerCamelCase_ = layer_norm
lowerCamelCase_ = k.T
lowerCamelCase_ = o.T
lowerCamelCase_ = q.T
lowerCamelCase_ = v.T
# Block i, layer 1 (MLP).
lowerCamelCase_ = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , 'encoder' , 'pre_mlp_layer_norm' )
lowerCamelCase_ , lowerCamelCase_ = tax_mlp_lookup(_lowerCamelCase , _lowerCamelCase , 'encoder' , _lowerCamelCase )
lowerCamelCase_ = layer_norm
if split_mlp_wi:
lowerCamelCase_ = wi[0].T
lowerCamelCase_ = wi[1].T
else:
lowerCamelCase_ = wi.T
lowerCamelCase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase_ = tax_relpos_bias_lookup(
_lowerCamelCase , _lowerCamelCase , 'encoder' ).T
lowerCamelCase_ = old['encoder/encoder_norm/scale']
if not scalable_attention:
lowerCamelCase_ = tax_relpos_bias_lookup(
_lowerCamelCase , 0 , 'encoder' ).T
lowerCamelCase_ = tax_relpos_bias_lookup(
_lowerCamelCase , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(_lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowerCamelCase_ = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , 'decoder' , 'pre_self_attention_layer_norm' )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = tax_attention_lookup(_lowerCamelCase , _lowerCamelCase , 'decoder' , 'self_attention' )
lowerCamelCase_ = layer_norm
lowerCamelCase_ = k.T
lowerCamelCase_ = o.T
lowerCamelCase_ = q.T
lowerCamelCase_ = v.T
# Block i, layer 1 (Cross Attention).
lowerCamelCase_ = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , 'decoder' , 'pre_cross_attention_layer_norm' )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = tax_attention_lookup(_lowerCamelCase , _lowerCamelCase , 'decoder' , 'encoder_decoder_attention' )
lowerCamelCase_ = layer_norm
lowerCamelCase_ = k.T
lowerCamelCase_ = o.T
lowerCamelCase_ = q.T
lowerCamelCase_ = v.T
# Block i, layer 2 (MLP).
lowerCamelCase_ = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , 'decoder' , 'pre_mlp_layer_norm' )
lowerCamelCase_ , lowerCamelCase_ = tax_mlp_lookup(_lowerCamelCase , _lowerCamelCase , 'decoder' , _lowerCamelCase )
lowerCamelCase_ = layer_norm
if split_mlp_wi:
lowerCamelCase_ = wi[0].T
lowerCamelCase_ = wi[1].T
else:
lowerCamelCase_ = wi.T
lowerCamelCase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase_ = tax_relpos_bias_lookup(_lowerCamelCase , _lowerCamelCase , 'decoder' ).T
lowerCamelCase_ = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCamelCase_ = old['decoder/logits_dense/kernel'].T
return new
def lowerCamelCase__ ( _lowerCamelCase : Tuple , _lowerCamelCase : bool ) -> Tuple:
lowerCamelCase_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCamelCase_ = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCamelCase_ = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
lowerCamelCase_ = state_dict['shared.weight']
return state_dict
def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] ) -> Optional[Any]:
lowerCamelCase_ = checkpoints.load_tax_checkpoint(_lowerCamelCase )
lowerCamelCase_ = convert_tax_to_pytorch(
_lowerCamelCase , num_layers=config.num_layers , is_encoder_only=_lowerCamelCase , scalable_attention=_lowerCamelCase )
lowerCamelCase_ = make_state_dict(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , ) -> Optional[Any]:
lowerCamelCase_ = MTaConfig.from_json_file(_lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCamelCase_ = UMTaEncoderModel(_lowerCamelCase )
else:
lowerCamelCase_ = UMTaForConditionalGeneration(_lowerCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_lowerCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_lowerCamelCase )
print('Done' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 549
|
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class a ( unittest.TestCase ):
def UpperCamelCase ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict ) -> str:
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for a, b in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , delta=__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : List[Any] ) -> int:
lowerCamelCase_ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def UpperCamelCase ( self : str ) -> Any:
lowerCamelCase_ = None
ops.enable_eager_execution_internal()
lowerCamelCase_ = tf.config.list_physical_devices('CPU' )
if len(__SCREAMING_SNAKE_CASE ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCamelCase_ = tf.config.list_logical_devices(device_type='CPU' )
lowerCamelCase_ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCamelCase_ = GradientAccumulator()
lowerCamelCase_ = tf.Variable([4.0, 3.0] )
lowerCamelCase_ , lowerCamelCase_ = create_optimizer(5e-5 , 10 , 5 )
lowerCamelCase_ = tf.Variable([0.0, 0.0] , trainable=__SCREAMING_SNAKE_CASE )
def accumulate_on_replica(__SCREAMING_SNAKE_CASE : Union[str, Any] ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(__SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any ):
with strategy.scope():
lowerCamelCase_ = strategy.experimental_local_results(__SCREAMING_SNAKE_CASE )
local_variables[0].assign(__SCREAMING_SNAKE_CASE )
local_variables[1].assign(__SCREAMING_SNAKE_CASE )
strategy.run(__SCREAMING_SNAKE_CASE , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(__SCREAMING_SNAKE_CASE )
def _check_local_values(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] ):
lowerCamelCase_ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , __SCREAMING_SNAKE_CASE , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , __SCREAMING_SNAKE_CASE , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 549
| 1
|
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCamelCase =2
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , *, # begin keyword-only arguments
__SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE=None , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[Any] = bos, unk, pad, eos
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Optional[int] = []
UpperCamelCase__ : str = {}
UpperCamelCase__ : List[str] = self.add_symbol(__UpperCamelCase )
UpperCamelCase__ : List[Any] = self.add_symbol(__UpperCamelCase )
UpperCamelCase__ : Union[str, Any] = self.add_symbol(__UpperCamelCase )
UpperCamelCase__ : Union[str, Any] = self.add_symbol(__UpperCamelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(__UpperCamelCase )
UpperCamelCase__ : Tuple = len(self.symbols )
def __eq__( self , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ) -> str:
"""simple docstring"""
return len(self.symbols )
def __contains__( self , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return sym in self.indices
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = cls()
d.add_from_file(__UpperCamelCase )
return d
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
if word in self.indices and not overwrite:
UpperCamelCase__ : Optional[Any] = self.indices[word]
UpperCamelCase__ : Union[str, Any] = self.count[idx] + n
return idx
else:
UpperCamelCase__ : int = len(self.symbols )
UpperCamelCase__ : Optional[Any] = idx
self.symbols.append(__UpperCamelCase )
self.count.append(__UpperCamelCase )
return idx
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return 0
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if isinstance(__UpperCamelCase , __UpperCamelCase ):
try:
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(__UpperCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(__UpperCamelCase ) )
return
UpperCamelCase__ : Any = f.readlines()
UpperCamelCase__ : Any = self._load_meta(__UpperCamelCase )
for line in lines[indices_start_line:]:
try:
UpperCamelCase__ ,UpperCamelCase__ : Tuple = line.rstrip().rsplit(''' ''' , 1 )
if field == "#fairseq:overwrite":
UpperCamelCase__ : Any = True
UpperCamelCase__ ,UpperCamelCase__ : Tuple = line.rsplit(''' ''' , 1 )
else:
UpperCamelCase__ : str = False
UpperCamelCase__ : Any = int(__UpperCamelCase )
UpperCamelCase__ : Tuple = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(__UpperCamelCase ) )
self.add_symbol(__UpperCamelCase , n=__UpperCamelCase , overwrite=__UpperCamelCase )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
UpperCamelCase__ : Any = dict((re.sub(R'''@@$''' , '''''' , UpperCAmelCase__ ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , UpperCAmelCase__ ), v) for k, v in d.items() )
UpperCamelCase__ : str = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
UpperCamelCase__ : str = d[k] # restore
return da
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
# prep
if not os.path.exists(UpperCAmelCase__ ):
raise ValueError(f'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
UpperCamelCase__ : List[str] = os.path.join(UpperCAmelCase__ , '''checkpoint.pt''' )
if not os.path.isfile(UpperCAmelCase__ ):
raise ValueError(f'''path to the file {checkpoint_file} does not exist!''' )
UpperCamelCase__ : List[str] = torch.load(UpperCAmelCase__ , map_location='''cpu''' )
UpperCamelCase__ : Optional[int] = chkpt['''cfg''']['''model''']
# dicts
UpperCamelCase__ : List[Any] = os.path.join(UpperCAmelCase__ , '''dict.txt''' )
if not os.path.isfile(UpperCAmelCase__ ):
raise ValueError(f'''path to the file {dict_file} does not exist!''' )
UpperCamelCase__ : Union[str, Any] = Dictionary.load(UpperCAmelCase__ )
UpperCamelCase__ : Optional[Any] = rewrite_dict_keys(src_dict.indices )
UpperCamelCase__ : int = len(UpperCAmelCase__ )
UpperCamelCase__ : int = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] )
print(f'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ , indent=UpperCAmelCase__ ) )
# merges_file (bpecodes)
UpperCamelCase__ : str = os.path.join(UpperCAmelCase__ , '''bpecodes''' )
if not os.path.isfile(UpperCAmelCase__ ):
raise ValueError(f'''path to the file {bpecodes_file} does not exist!''' )
UpperCamelCase__ : Any = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(UpperCAmelCase__ , UpperCAmelCase__ )
# model config
UpperCamelCase__ : List[Any] = os.path.join(UpperCAmelCase__ , '''config.json''' )
UpperCamelCase__ : List[Any] = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.02,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1e-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(f'''Generating {biogpt_model_config_file}''' )
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ , indent=UpperCAmelCase__ ) )
# tokenizer config
UpperCamelCase__ : Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase__ : List[Any] = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 1_0_2_4,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(f'''Generating {biogpt_tokenizer_config_file}''' )
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ , indent=UpperCAmelCase__ ) )
# model
UpperCamelCase__ : Any = chkpt['''model''']
# remove unneeded keys
UpperCamelCase__ : Optional[int] = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase__ : List[str] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
UpperCamelCase__ : List[Any] = model_state_dict.pop(UpperCAmelCase__ )
else:
UpperCamelCase__ : Optional[Any] = model_state_dict.pop(UpperCAmelCase__ )
UpperCamelCase__ : Tuple = BioGptConfig.from_pretrained(UpperCAmelCase__ )
UpperCamelCase__ : str = BioGptForCausalLM(UpperCAmelCase__ )
# check that it loads ok
model_new.load_state_dict(UpperCAmelCase__ )
# save
UpperCamelCase__ : str = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
print('''Conversion is done!''' )
if __name__ == "__main__":
lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--biogpt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase =parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 715
|
lowerCamelCase ={"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
lowerCamelCase =["a", "b", "c", "d", "e"]
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : str = start
# add current to visited
visited.append(UpperCamelCase__ )
UpperCamelCase__ : int = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
UpperCamelCase__ : int = topological_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# if all neighbors visited add current to sort
sort.append(UpperCamelCase__ )
# if all vertices haven't been visited select a new one to visit
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
for vertice in vertices:
if vertice not in visited:
UpperCamelCase__ : Optional[int] = topological_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# return sort
return sort
if __name__ == "__main__":
lowerCamelCase =topological_sort("a", [], [])
print(sort)
| 462
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : List[Any] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ['ConditionalDetrFeatureExtractor']
UpperCAmelCase_ : str = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44
|
from math import ceil
def _lowercase ( __UpperCamelCase : int = 1001 ):
snake_case__ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
snake_case__ = 2 * i + 1
snake_case__ = 2 * i
snake_case__ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
lowerCAmelCase : Tuple = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 214
| 0
|
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str ):
'''simple docstring'''
def get_masked_lm_array(__lowerCamelCase : str ):
_UpperCAmelCase : str =f"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"
_UpperCAmelCase : Optional[Any] =tf.train.load_variable(__lowerCamelCase , __lowerCamelCase )
if "kernel" in name:
_UpperCAmelCase : int =array.transpose()
return torch.from_numpy(__lowerCamelCase )
def get_encoder_array(__lowerCamelCase : str ):
_UpperCAmelCase : List[str] =f"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"
_UpperCAmelCase : str =tf.train.load_variable(__lowerCamelCase , __lowerCamelCase )
if "kernel" in name:
_UpperCAmelCase : Tuple =array.transpose()
return torch.from_numpy(__lowerCamelCase )
def get_encoder_layer_array(__lowerCamelCase : int , __lowerCamelCase : str ):
_UpperCAmelCase : str =f"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"
_UpperCAmelCase : int =tf.train.load_variable(__lowerCamelCase , __lowerCamelCase )
if "kernel" in name:
_UpperCAmelCase : Tuple =array.transpose()
return torch.from_numpy(__lowerCamelCase )
def get_encoder_attention_layer_array(__lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[str] ):
_UpperCAmelCase : Tuple =f"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"
_UpperCAmelCase : Dict =tf.train.load_variable(__lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase : Tuple =array.reshape(__lowerCamelCase )
if "kernel" in name:
_UpperCAmelCase : Dict =array.transpose()
return torch.from_numpy(__lowerCamelCase )
print(f"Loading model based on config from {config_path}..." )
_UpperCAmelCase : Any =BertConfig.from_json_file(__lowerCamelCase )
_UpperCAmelCase : Optional[Any] =BertForMaskedLM(__lowerCamelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
_UpperCAmelCase : BertLayer =model.bert.encoder.layer[layer_index]
# Self-attention
_UpperCAmelCase : BertSelfAttention =layer.attention.self
_UpperCAmelCase : List[Any] =get_encoder_attention_layer_array(
__lowerCamelCase , '_query_dense/kernel' , self_attn.query.weight.data.shape )
_UpperCAmelCase : Union[str, Any] =get_encoder_attention_layer_array(
__lowerCamelCase , '_query_dense/bias' , self_attn.query.bias.data.shape )
_UpperCAmelCase : Union[str, Any] =get_encoder_attention_layer_array(
__lowerCamelCase , '_key_dense/kernel' , self_attn.key.weight.data.shape )
_UpperCAmelCase : Any =get_encoder_attention_layer_array(
__lowerCamelCase , '_key_dense/bias' , self_attn.key.bias.data.shape )
_UpperCAmelCase : Any =get_encoder_attention_layer_array(
__lowerCamelCase , '_value_dense/kernel' , self_attn.value.weight.data.shape )
_UpperCAmelCase : List[str] =get_encoder_attention_layer_array(
__lowerCamelCase , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
_UpperCAmelCase : BertSelfOutput =layer.attention.output
_UpperCAmelCase : List[str] =get_encoder_attention_layer_array(
__lowerCamelCase , '_output_dense/kernel' , self_output.dense.weight.data.shape )
_UpperCAmelCase : int =get_encoder_attention_layer_array(
__lowerCamelCase , '_output_dense/bias' , self_output.dense.bias.data.shape )
_UpperCAmelCase : str =get_encoder_layer_array(__lowerCamelCase , '_attention_layer_norm/gamma' )
_UpperCAmelCase : Dict =get_encoder_layer_array(__lowerCamelCase , '_attention_layer_norm/beta' )
# Intermediate
_UpperCAmelCase : BertIntermediate =layer.intermediate
_UpperCAmelCase : Tuple =get_encoder_layer_array(__lowerCamelCase , '_intermediate_dense/kernel' )
_UpperCAmelCase : Any =get_encoder_layer_array(__lowerCamelCase , '_intermediate_dense/bias' )
# Output
_UpperCAmelCase : BertOutput =layer.output
_UpperCAmelCase : Dict =get_encoder_layer_array(__lowerCamelCase , '_output_dense/kernel' )
_UpperCAmelCase : List[Any] =get_encoder_layer_array(__lowerCamelCase , '_output_dense/bias' )
_UpperCAmelCase : Dict =get_encoder_layer_array(__lowerCamelCase , '_output_layer_norm/gamma' )
_UpperCAmelCase : Optional[Any] =get_encoder_layer_array(__lowerCamelCase , '_output_layer_norm/beta' )
# Embeddings
_UpperCAmelCase : str =get_encoder_array('_position_embedding_layer/embeddings' )
_UpperCAmelCase : str =get_encoder_array('_type_embedding_layer/embeddings' )
_UpperCAmelCase : Any =get_encoder_array('_embedding_norm_layer/gamma' )
_UpperCAmelCase : str =get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
_UpperCAmelCase : Dict =model.cls.predictions.transform
_UpperCAmelCase : Tuple =get_masked_lm_array('dense/kernel' )
_UpperCAmelCase : Tuple =get_masked_lm_array('dense/bias' )
_UpperCAmelCase : Dict =get_masked_lm_array('layer_norm/gamma' )
_UpperCAmelCase : Optional[Any] =get_masked_lm_array('layer_norm/beta' )
_UpperCAmelCase : Any =get_masked_lm_array('embedding_table' )
# Pooling
_UpperCAmelCase : Union[str, Any] =BertPooler(config=__lowerCamelCase )
_UpperCAmelCase : BertPooler =get_encoder_array('_pooler_layer/kernel' )
_UpperCAmelCase : BertPooler =get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(__lowerCamelCase )
# Integration test - should load without any errors ;)
_UpperCAmelCase : Dict =BertForMaskedLM.from_pretrained(__lowerCamelCase )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
lowercase =argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow Token Dropping checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model.',
)
lowercase =parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 331
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase =logging.get_logger(__name__)
lowercase ={
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="dpr"
def __init__( self , snake_case=3_0_5_2_2 , snake_case=7_6_8 , snake_case=1_2 , snake_case=1_2 , snake_case=3_0_7_2 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=2 , snake_case=0.02 , snake_case=1E-1_2 , snake_case=0 , snake_case="absolute" , snake_case = 0 , **snake_case , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=snake_case , **snake_case)
_UpperCAmelCase : int =vocab_size
_UpperCAmelCase : Dict =hidden_size
_UpperCAmelCase : List[Any] =num_hidden_layers
_UpperCAmelCase : List[Any] =num_attention_heads
_UpperCAmelCase : str =hidden_act
_UpperCAmelCase : Optional[Any] =intermediate_size
_UpperCAmelCase : Optional[Any] =hidden_dropout_prob
_UpperCAmelCase : Tuple =attention_probs_dropout_prob
_UpperCAmelCase : int =max_position_embeddings
_UpperCAmelCase : Tuple =type_vocab_size
_UpperCAmelCase : Union[str, Any] =initializer_range
_UpperCAmelCase : Tuple =layer_norm_eps
_UpperCAmelCase : int =projection_dim
_UpperCAmelCase : List[Any] =position_embedding_type
| 331
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = IFPipeline
UpperCAmelCase : List[str] = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
UpperCAmelCase : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase : Any = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCAmelCase_ ( self : int ):
return self._get_dummy_components()
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
_A = torch.manual_seed(_UpperCAmelCase )
else:
_A = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase_ ( self : Union[str, Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def lowerCAmelCase_ ( self : Optional[int] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase_ ( self : int ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase_ ( self : int ):
self._test_save_load_local()
def lowerCAmelCase_ ( self : Any ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCAmelCase_ ( self : Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Optional[Any] ):
# if
_A = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
_A = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
_A , _A = pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_A = None
_A = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_A = IFImgaImgPipeline(**pipe_a.components )
_A = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_A = IFInpaintingPipeline(**pipe_a.components )
_A = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] ):
# pipeline 1
_start_torch_memory_measurement()
_A = torch.Generator(device='cpu' ).manual_seed(0 )
_A = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type='np' , )
_A = output.images[0]
assert image.shape == (64, 64, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
_A = torch.Generator(device='cpu' ).manual_seed(0 )
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_A = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='np' , )
_A = output.images[0]
assert image.shape == (256, 256, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ):
# pipeline 1
_start_torch_memory_measurement()
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_A = torch.Generator(device='cpu' ).manual_seed(0 )
_A = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type='np' , )
_A = output.images[0]
assert image.shape == (64, 64, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
_A = torch.Generator(device='cpu' ).manual_seed(0 )
_A = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_A = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , original_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='np' , )
_A = output.images[0]
assert image.shape == (256, 256, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] ):
# pipeline 1
_start_torch_memory_measurement()
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(_UpperCAmelCase )
_A = torch.Generator(device='cpu' ).manual_seed(0 )
_A = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type='np' , )
_A = output.images[0]
assert image.shape == (64, 64, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
_A = torch.Generator(device='cpu' ).manual_seed(0 )
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_A = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_A = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(_UpperCAmelCase )
_A = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , original_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='np' , )
_A = output.images[0]
assert image.shape == (256, 256, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( ) -> Optional[int]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 7
|
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def _snake_case ( ) -> None:
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 7
| 1
|
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__A = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class _snake_case ( tr.AbstractTransform ):
def __init__( self : Optional[Any] , UpperCAmelCase : str = " " ):
__lowerCamelCase : List[str] = sentence_delimiter
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : str ):
return list(UpperCAmelCase )
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : List[str] ):
__lowerCamelCase : Any = []
for sent_idx, sentence in enumerate(UpperCAmelCase ):
chars.extend(self.process_string(UpperCAmelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCAmelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
__A = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__A = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__A = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__A = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
__A = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def lowerCamelCase__ ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]=False ):
if concatenate_texts:
return jiwer.compute_measures(
UpperCAmelCase , UpperCAmelCase , truth_transform=UpperCAmelCase , hypothesis_transform=UpperCAmelCase , )["wer"]
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Optional[int] = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Dict = jiwer.compute_measures(
UpperCAmelCase , UpperCAmelCase , truth_transform=UpperCAmelCase , hypothesis_transform=UpperCAmelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 713
|
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class _snake_case :
def __init__( self : str ):
__lowerCamelCase : Optional[Any] = psutil.Process()
__lowerCamelCase : List[Any] = False
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : List[Any] = -1
while True:
__lowerCamelCase : Union[str, Any] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : Union[str, Any] = threading.Thread(target=self.peak_monitor )
__lowerCamelCase : Optional[Any] = True
self.thread.start()
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : str = False
self.thread.join()
return self.cpu_memory_peak
__A = PeakCPUMemory()
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase : Any = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCamelCase : Optional[int] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCamelCase : int = torch.cuda.memory_allocated(_lowerCamelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def lowercase_ ( _lowerCamelCase: Optional[Any] ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : Tuple = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCamelCase : Dict = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
__lowerCamelCase : Tuple = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCamelCase : Any = (torch.cuda.memory_allocated(_lowerCamelCase ) - start_measures[str(_lowerCamelCase )]) / 2**20
__lowerCamelCase : Optional[int] = (torch.cuda.max_memory_allocated(_lowerCamelCase ) - start_measures[str(_lowerCamelCase )]) / 2**20
return measures
def lowercase_ ( _lowerCamelCase: int , _lowerCamelCase: List[Any] ) -> Optional[int]:
'''simple docstring'''
print(F"""{description}:""" )
print(F"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(_lowerCamelCase )]:.2f}MiB""" )
__lowerCamelCase : List[Any] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 366
| 0
|
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A , A ) -> float:
lowerCAmelCase__ = sorted(numsa + numsa )
lowerCAmelCase__ , lowerCAmelCase__ = divmod(len(A ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = [float(x) for x in input('''Enter the elements of first array: ''').split()]
__UpperCAmelCase = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 90
|
from itertools import count
def A_ ( _UpperCAmelCase = 50 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] * min_block_length
for n in count(_UpperCAmelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCAmelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 671
| 0
|
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_snake_case = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 16_000 ):
'''simple docstring'''
_lowerCAmelCase : str = int(round(sample_rate * max_length ) )
if len(_lowerCamelCase ) <= sample_length:
return wav
_lowerCAmelCase : str = randint(0 , len(_lowerCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(default=a , metadata={'help': 'Name of a dataset from the datasets package'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'A file containing the training audio paths and labels.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'A file containing the validation audio paths and labels.'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
lowerCamelCase__ = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowerCamelCase__ = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
lowerCamelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Name or path of preprocessor config.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def snake_case__ ( self):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", __a, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`.")
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _lowerCamelCase , _lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCAmelCase : str = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_lowerCAmelCase : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
_lowerCAmelCase : Tuple = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--label_column_name` to the correct text column - one of "
F"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_lowerCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_lowerCAmelCase : Tuple = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_lowerCAmelCase : Dict = feature_extractor.model_input_names[0]
def train_transforms(_lowerCamelCase ):
_lowerCAmelCase : Dict = []
for audio in batch[data_args.audio_column_name]:
_lowerCAmelCase : Tuple = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = feature_extractor(_lowerCamelCase , sampling_rate=feature_extractor.sampling_rate )
_lowerCAmelCase : str = {model_input_name: inputs.get(_lowerCamelCase )}
_lowerCAmelCase : Any = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_lowerCamelCase ):
_lowerCAmelCase : List[str] = [audio["array"] for audio in batch[data_args.audio_column_name]]
_lowerCAmelCase : Union[str, Any] = feature_extractor(_lowerCamelCase , sampling_rate=feature_extractor.sampling_rate )
_lowerCAmelCase : Tuple = {model_input_name: inputs.get(_lowerCamelCase )}
_lowerCAmelCase : Dict = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_lowerCAmelCase : Optional[int] = raw_datasets["train"].features[data_args.label_column_name].names
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = {}, {}
for i, label in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = str(_lowerCamelCase )
_lowerCAmelCase : List[Any] = label
# Load the accuracy metric from the datasets package
_lowerCAmelCase : Optional[int] = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_lowerCamelCase , references=eval_pred.label_ids )
_lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel=_lowerCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Union[str, Any] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_lowerCAmelCase : Optional[int] = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_lowerCamelCase , output_all_columns=_lowerCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_lowerCAmelCase : List[Any] = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_lowerCamelCase , output_all_columns=_lowerCamelCase )
# Initialize our trainer
_lowerCAmelCase : str = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_lowerCamelCase , tokenizer=_lowerCamelCase , )
# Training
if training_args.do_train:
_lowerCAmelCase : List[Any] = None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase : Optional[int] = last_checkpoint
_lowerCAmelCase : int = trainer.train(resume_from_checkpoint=_lowerCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowerCAmelCase : List[str] = trainer.evaluate()
trainer.log_metrics("eval" , _lowerCamelCase )
trainer.save_metrics("eval" , _lowerCamelCase )
# Write model card and (optionally) push to hub
_lowerCAmelCase : int = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCamelCase )
else:
trainer.create_model_card(**_lowerCamelCase )
if __name__ == "__main__":
main()
| 658
|
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[str] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658
| 1
|
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowerCAmelCase = re.compile(R'''^(?P<major>\d+)''' R'''\.(?P<minor>\d+)''' R'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class A :
UpperCamelCase_ : Union[str, Any] =42
UpperCamelCase_ : Any =None
UpperCamelCase_ : Any =None
UpperCamelCase_ : int =None
UpperCamelCase_ : Any =None
def _A (self ):
__lowercase= _str_to_version_tuple(self.version_str )
def __repr__(self ):
return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def _A (self ):
return self.major, self.minor, self.patch
def _A (self , lowerCAmelCase ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return Version(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return other
raise TypeError(f'{other} (type {type(__lowerCAmelCase )}) cannot be compared to version.' )
def __eq__(self , lowerCAmelCase ):
try:
__lowercase= self._validate_operand(__lowerCAmelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__(self , lowerCAmelCase ):
__lowercase= self._validate_operand(__lowerCAmelCase )
return self.tuple < other.tuple
def __hash__(self ):
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _A (cls , lowerCAmelCase ):
__lowercase= {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _A (self ):
return self.version_str
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
__lowercase= _VERSION_REG.match(lowercase__ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(lowercase__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
return ".".join(str(lowercase__ ) for v in version_tuple )
| 230
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(snake_case, snake_case )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Tuple = emb.weight.shape
__magic_name__ :int = nn.Linear(snake_case, snake_case, bias=snake_case )
__magic_name__ :str = emb.weight.data
return lin_layer
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :int = torch.load(snake_case, map_location='''cpu''' )
__magic_name__ :Optional[Any] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__magic_name__ :List[Any] = mam_aaa['''model''']
remove_ignore_keys_(snake_case )
__magic_name__ :Tuple = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__magic_name__ :List[str] = MaMaaaConfig(
vocab_size=snake_case, max_position_embeddings=1_0_2_4, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', )
__magic_name__ :int = state_dict['''decoder.embed_tokens.weight''']
__magic_name__ :List[str] = MaMaaaForConditionalGeneration(snake_case )
model.model.load_state_dict(snake_case, strict=snake_case )
__magic_name__ :List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 0
| 0
|
'''simple docstring'''
import numpy as np
def lowerCamelCase__ ( A : List[Any] , A : int ):
'''simple docstring'''
return np.where(vector > 0 , _snake_case , (alpha * (np.exp(_snake_case ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : int = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 0
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
lowerCamelCase__ = get_logger(__name__)
class UpperCamelCase ( enum.Enum ):
__UpperCamelCase = '''all_checks'''
__UpperCamelCase = '''basic_checks'''
__UpperCamelCase = '''no_checks'''
class UpperCamelCase ( _UpperCAmelCase ):
pass
class UpperCamelCase ( _UpperCAmelCase ):
pass
class UpperCamelCase ( _UpperCAmelCase ):
pass
class UpperCamelCase ( _UpperCAmelCase ):
pass
def _lowerCamelCase( __snake_case , __snake_case , __snake_case=None ) -> Optional[int]:
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(A__ ) - set(A__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(A__ ) - set(A__ ) ) )
if len(set(A__ ) - set(A__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(A__ ) - set(A__ ) ) )
__snake_case = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__snake_case = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(A__ ) > 0:
raise NonMatchingChecksumError(
f"""Checksums didn't match{for_verification_name}:\n"""
f"""{bad_urls}\n"""
"Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class UpperCamelCase ( _UpperCAmelCase ):
pass
class UpperCamelCase ( _UpperCAmelCase ):
pass
class UpperCamelCase ( _UpperCAmelCase ):
pass
class UpperCamelCase ( _UpperCAmelCase ):
pass
def _lowerCamelCase( __snake_case , __snake_case ) -> Dict:
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(A__ ) - set(A__ ) ) > 0:
raise ExpectedMoreSplits(str(set(A__ ) - set(A__ ) ) )
if len(set(A__ ) - set(A__ ) ) > 0:
raise UnexpectedSplits(str(set(A__ ) - set(A__ ) ) )
__snake_case = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(A__ ) > 0:
raise NonMatchingSplitsSizesError(str(A__ ) )
logger.info("All the splits matched successfully." )
def _lowerCamelCase( __snake_case , __snake_case = True ) -> dict:
if record_checksum:
__snake_case = shaaaa()
with open(A__ , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b"" ):
m.update(A__ )
__snake_case = m.hexdigest()
else:
__snake_case = None
return {"num_bytes": os.path.getsize(A__ ), "checksum": checksum}
def _lowerCamelCase( __snake_case ) -> List[Any]:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 524
|
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = LayoutLMTokenizer
lowerCamelCase : Any = LayoutLMTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : List[Any] = True
def lowercase__ ( self : Optional[int] ):
super().setUp()
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : Optional[int] , **_lowercase : str ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : str = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__ : Any = '''unwanted, running'''
return input_text, output_text
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [7, 4, 5, 10, 8, 9] )
def lowercase__ ( self : str ):
pass
| 35
| 0
|
def _a ( lowerCamelCase__ ) -> float:
return 10 - x * x
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(lowerCamelCase__ ) * equation(lowerCamelCase__ ) >= 0:
raise ValueError('Wrong space!' )
lowerCamelCase_ : Any = a
while (b - a) >= 0.01:
# Find middle point
lowerCamelCase_ : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(lowerCamelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowerCamelCase__ ) * equation(lowerCamelCase__ ) < 0:
lowerCamelCase_ : Optional[int] = c
else:
lowerCamelCase_ : str = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 144
|
import warnings
from .generation import TFGenerationMixin
class lowerCamelCase__ ( UpperCAmelCase ):
# warning at import time
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.', UpperCAmelCase, )
| 144
| 1
|
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_a : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCAmelCase ( lowercase ) -> Any:
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""" , lowercase , )
if isinstance(lowercase , torch.Tensor ):
return image
elif isinstance(lowercase , PIL.Image.Image ):
__lowerCAmelCase = [image]
if isinstance(image[0] , PIL.Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image[0].size
__lowerCAmelCase , __lowerCAmelCase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__lowerCAmelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
__lowerCAmelCase = np.concatenate(lowercase , axis=0 )
__lowerCAmelCase = np.array(lowercase ).astype(np.floataa ) / 2_55.0
__lowerCAmelCase = image.transpose(0 , 3 , 1 , 2 )
__lowerCAmelCase = 2.0 * image - 1.0
__lowerCAmelCase = torch.from_numpy(lowercase )
elif isinstance(image[0] , torch.Tensor ):
__lowerCAmelCase = torch.cat(lowercase , dim=0 )
return image
def _lowerCAmelCase ( lowercase ) -> Union[str, Any]:
if isinstance(lowercase , torch.Tensor ):
return mask
elif isinstance(lowercase , PIL.Image.Image ):
__lowerCAmelCase = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = mask[0].size
__lowerCAmelCase , __lowerCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__lowerCAmelCase = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
__lowerCAmelCase = np.concatenate(lowercase , axis=0 )
__lowerCAmelCase = mask.astype(np.floataa ) / 2_55.0
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = torch.from_numpy(lowercase )
elif isinstance(mask[0] , torch.Tensor ):
__lowerCAmelCase = torch.cat(lowercase , dim=0 )
return mask
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : UNetaDModel
a : RePaintScheduler
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE,scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 2_50,__SCREAMING_SNAKE_CASE = 0.0,__SCREAMING_SNAKE_CASE = 10,__SCREAMING_SNAKE_CASE = 10,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "pil",__SCREAMING_SNAKE_CASE = True,):
'''simple docstring'''
__lowerCAmelCase = image
__lowerCAmelCase = _preprocess_image(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = original_image.to(device=self.device,dtype=self.unet.dtype )
__lowerCAmelCase = _preprocess_mask(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = mask_image.to(device=self.device,dtype=self.unet.dtype )
__lowerCAmelCase = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
__lowerCAmelCase = original_image.shape
__lowerCAmelCase = randn_tensor(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,device=self.device,dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,self.device )
__lowerCAmelCase = eta
__lowerCAmelCase = self.scheduler.timesteps[0] + 1
__lowerCAmelCase = generator[0] if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__lowerCAmelCase = self.unet(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ).sample
# compute previous image: x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__lowerCAmelCase = self.scheduler.undo_step(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = t
__lowerCAmelCase = (image / 2 + 0.5).clamp(0,1 )
__lowerCAmelCase = image.cpu().permute(0,2,3,1 ).numpy()
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 689
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Optional[int] =TextToVideoSDPipeline
a : Optional[int] =TEXT_TO_IMAGE_PARAMS
a : Any =TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a : Union[str, Any] =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""),up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""),cross_attention_dim=32,attention_head_dim=4,)
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,)
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,sample_size=1_28,)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,hidden_act="""gelu""",projection_dim=5_12,)
__lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = TextToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """np"""
__lowerCAmelCase = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
__lowerCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__lowerCAmelCase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available(),reason="""XFormers attention is only available with CUDA and `xformers` installed""",)
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=25,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=2,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 689
| 1
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
UpperCamelCase = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCamelCase__ :
def __init__(self : Dict , _snake_case : Union[str, Any] , _snake_case : List[Any]=16 , _snake_case : str=13 , _snake_case : List[str]=7 , _snake_case : List[Any]=14 , _snake_case : Any=10 , _snake_case : int=19 , _snake_case : str=5 , _snake_case : Optional[Any]=4 , _snake_case : Optional[Any]=True , _snake_case : List[Any]=16 , _snake_case : List[str]=2 , _snake_case : List[Any]=4 , _snake_case : str=4 , _snake_case : Optional[Any]="gelu" , _snake_case : Dict=0.1 , _snake_case : Any=0.1 , _snake_case : List[Any]=[1, 2, 3, 4, 5] , _snake_case : Dict=25 , _snake_case : List[Any]=5 , ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Any = d_model
lowerCamelCase_ : int = parent
lowerCamelCase_ : Optional[int] = batch_size
lowerCamelCase_ : List[str] = prediction_length
lowerCamelCase_ : Optional[int] = context_length
lowerCamelCase_ : Any = cardinality
lowerCamelCase_ : Union[str, Any] = num_time_features
lowerCamelCase_ : Dict = lags_sequence
lowerCamelCase_ : List[str] = embedding_dimension
lowerCamelCase_ : Any = is_training
lowerCamelCase_ : Union[str, Any] = hidden_size
lowerCamelCase_ : Union[str, Any] = num_hidden_layers
lowerCamelCase_ : Union[str, Any] = num_attention_heads
lowerCamelCase_ : List[str] = intermediate_size
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase_ : Optional[int] = context_length
lowerCamelCase_ : Optional[Any] = prediction_length + label_length
lowerCamelCase_ : Optional[Any] = label_length
lowerCamelCase_ : List[Any] = moving_average
lowerCamelCase_ : Union[str, Any] = autocorrelation_factor
def UpperCAmelCase_ (self : Dict ) -> Dict:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def UpperCAmelCase_ (self : Tuple , _snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = config.context_length + max(config.lags_sequence )
lowerCamelCase_ : str = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowerCamelCase_ : Dict = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowerCamelCase_ : Any = floats_tensor([self.batch_size, _past_length] )
lowerCamelCase_ : Any = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowerCamelCase_ : int = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowerCamelCase_ : Union[str, Any] = floats_tensor([self.batch_size, config.prediction_length] )
lowerCamelCase_ : Optional[int] = {
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def UpperCAmelCase_ (self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : List[str] = self.get_config()
lowerCamelCase_ : Optional[Any] = self.prepare_autoformer_inputs_dict(_snake_case )
return config, inputs_dict
def UpperCAmelCase_ (self : int ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : str = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase_ (self : List[Any] , _snake_case : Dict , _snake_case : str ) -> int:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = AutoformerModel(config=_snake_case ).to(_snake_case ).eval()
lowerCamelCase_ : Any = model(**_snake_case )
lowerCamelCase_ : int = outputs.encoder_last_hidden_state
lowerCamelCase_ : int = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ : Any = model.get_encoder()
encoder.save_pretrained(_snake_case )
lowerCamelCase_ : List[Any] = AutoformerEncoder.from_pretrained(_snake_case ).to(_snake_case )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Tuple = model.create_network_inputs(**_snake_case )
lowerCamelCase_ , lowerCamelCase_ : Dict = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowerCamelCase_ : str = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowerCamelCase_ : Dict = encoder(inputs_embeds=_snake_case )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
lowerCamelCase_ : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowerCamelCase_ : Optional[Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowerCamelCase_ : List[Any] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowerCamelCase_ : List[Any] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ : List[Any] = model.get_decoder()
decoder.save_pretrained(_snake_case )
lowerCamelCase_ : str = AutoformerDecoder.from_pretrained(_snake_case ).to(_snake_case )
lowerCamelCase_ : Union[str, Any] = decoder(
trend=_snake_case , inputs_embeds=_snake_case , encoder_hidden_states=_snake_case , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowerCamelCase__ ( UpperCAmelCase, UpperCAmelCase, unittest.TestCase ):
lowerCamelCase_ : Optional[int] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowerCamelCase_ : List[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
lowerCamelCase_ : Optional[int] = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
lowerCamelCase_ : str = False
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : Dict = False
lowerCamelCase_ : int = False
lowerCamelCase_ : Union[str, Any] = False
lowerCamelCase_ : Any = False
def UpperCAmelCase_ (self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = AutoformerModelTester(self )
lowerCamelCase_ : Optional[int] = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case )
def UpperCAmelCase_ (self : Dict ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ (self : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCamelCase_ : str = model_class(_snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case )
lowerCamelCase_ , lowerCamelCase_ : Optional[int] = model_class.from_pretrained(_snake_case , output_loading_info=_snake_case )
self.assertEqual(info['missing_keys'] , [] )
def UpperCAmelCase_ (self : Any ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_snake_case )
@unittest.skip(reason='Model has no tokens embeddings' )
def UpperCAmelCase_ (self : str ) -> List[Any]:
"""simple docstring"""
pass
def UpperCAmelCase_ (self : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Any = inspect.signature(getattr(_snake_case , 'forward' ) )
# The main input is the name of the argument after `self`
lowerCamelCase_ : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , _snake_case )
def UpperCAmelCase_ (self : str ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : List[str] = model_class(_snake_case )
lowerCamelCase_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : Dict = [*signature.parameters.keys()]
lowerCamelCase_ : Dict = [
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('future_observed_mask' )
expected_arg_names.extend(
[
'decoder_attention_mask',
'head_mask',
'decoder_head_mask',
'cross_attn_head_mask',
'encoder_outputs',
'past_key_values',
'output_hidden_states',
'output_attentions',
'use_cache',
'return_dict',
] )
self.assertListEqual(arg_names[: len(_snake_case )] , _snake_case )
def UpperCAmelCase_ (self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Tuple = True
lowerCamelCase_ : List[Any] = getattr(self.model_tester , 'seq_length' , _snake_case )
lowerCamelCase_ : Optional[Any] = getattr(self.model_tester , 'decoder_seq_length' , _snake_case )
lowerCamelCase_ : Any = getattr(self.model_tester , 'encoder_seq_length' , _snake_case )
lowerCamelCase_ : List[Any] = getattr(self.model_tester , 'd_model' , _snake_case )
lowerCamelCase_ : Union[str, Any] = getattr(self.model_tester , 'num_attention_heads' , _snake_case )
lowerCamelCase_ : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
lowerCamelCase_ : int = True
lowerCamelCase_ : int = False
lowerCamelCase_ : int = True
lowerCamelCase_ : Union[str, Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase_ : Tuple = model(**self._prepare_for_class(_snake_case , _snake_case ) )
lowerCamelCase_ : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ : int = True
lowerCamelCase_ : Tuple = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase_ : int = model(**self._prepare_for_class(_snake_case , _snake_case ) )
lowerCamelCase_ : Dict = outputs.encoder_attentions
self.assertEqual(len(_snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowerCamelCase_ : Any = len(_snake_case )
lowerCamelCase_ : List[str] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(_snake_case , _snake_case )
# decoder attentions
lowerCamelCase_ : List[Any] = outputs.decoder_attentions
self.assertIsInstance(_snake_case , (list, tuple) )
self.assertEqual(len(_snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowerCamelCase_ : Union[str, Any] = outputs.cross_attentions
self.assertIsInstance(_snake_case , (list, tuple) )
self.assertEqual(len(_snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowerCamelCase_ : int = True
lowerCamelCase_ : int = True
lowerCamelCase_ : List[str] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase_ : Union[str, Any] = model(**self._prepare_for_class(_snake_case , _snake_case ) )
self.assertEqual(out_len + 2 , len(_snake_case ) )
lowerCamelCase_ : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def UpperCAmelCase_ (self : int ) -> Optional[Any]:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def _a ( lowerCamelCase__="train-batch.pt" ) -> int:
lowerCamelCase_ : Optional[Any] = hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' , filename=lowerCamelCase__ , repo_type='dataset' )
lowerCamelCase_ : Optional[int] = torch.load(lowerCamelCase__ , map_location=lowerCamelCase__ )
return batch
@require_torch
@slow
class lowerCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ (self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(_snake_case )
lowerCamelCase_ : Union[str, Any] = prepare_batch()
with torch.no_grad():
lowerCamelCase_ : Any = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , future_values=batch['future_values'] , future_time_features=batch['future_time_features'] , )[0]
lowerCamelCase_ : Any = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , _snake_case )
lowerCamelCase_ : List[str] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=_snake_case )
self.assertTrue(torch.allclose(output[0, :3, :3] , _snake_case , atol=_snake_case ) )
def UpperCAmelCase_ (self : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Dict = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(_snake_case )
lowerCamelCase_ : Optional[int] = prepare_batch('val-batch.pt' )
with torch.no_grad():
lowerCamelCase_ : Dict = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , ).encoder_last_hidden_state
lowerCamelCase_ : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , _snake_case )
lowerCamelCase_ : Optional[int] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=_snake_case )
self.assertTrue(torch.allclose(output[0, :3, :3] , _snake_case , atol=_snake_case ) )
def UpperCAmelCase_ (self : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(_snake_case )
lowerCamelCase_ : Tuple = prepare_batch('val-batch.pt' )
with torch.no_grad():
lowerCamelCase_ : Optional[Any] = model.generate(
static_categorical_features=batch['static_categorical_features'] , past_time_features=batch['past_time_features'] , past_values=batch['past_values'] , future_time_features=batch['future_time_features'] , past_observed_mask=batch['past_observed_mask'] , )
lowerCamelCase_ : Optional[int] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , _snake_case )
lowerCamelCase_ : Union[str, Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=_snake_case )
lowerCamelCase_ : List[str] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _snake_case , rtol=1e-1 ) )
| 144
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__(self : int , _snake_case : Union[str, Any] , _snake_case : Union[str, Any]=3 , _snake_case : Tuple=32 , _snake_case : Any=3 , _snake_case : List[Any]=10 , _snake_case : List[Any]=[10, 20, 30, 40] , _snake_case : Union[str, Any]=[1, 1, 2, 1] , _snake_case : Tuple=True , _snake_case : Any=True , _snake_case : Optional[Any]="relu" , _snake_case : str=3 , _snake_case : Tuple=None , ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Tuple = parent
lowerCamelCase_ : Tuple = batch_size
lowerCamelCase_ : Any = image_size
lowerCamelCase_ : Union[str, Any] = num_channels
lowerCamelCase_ : str = embeddings_size
lowerCamelCase_ : List[str] = hidden_sizes
lowerCamelCase_ : str = depths
lowerCamelCase_ : int = is_training
lowerCamelCase_ : str = use_labels
lowerCamelCase_ : Optional[Any] = hidden_act
lowerCamelCase_ : List[str] = num_labels
lowerCamelCase_ : List[str] = scope
lowerCamelCase_ : List[Any] = len(_snake_case )
def UpperCAmelCase_ (self : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ : str = self.get_config()
return config, pixel_values
def UpperCAmelCase_ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase_ (self : Optional[Any] , _snake_case : Any , _snake_case : Any ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = FlaxRegNetModel(config=_snake_case )
lowerCamelCase_ : str = model(_snake_case )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ (self : Tuple , _snake_case : Any , _snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : int = self.num_labels
lowerCamelCase_ : int = FlaxRegNetForImageClassification(config=_snake_case )
lowerCamelCase_ : Any = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ : int = config_and_inputs
lowerCamelCase_ : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( UpperCAmelCase, unittest.TestCase ):
lowerCamelCase_ : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCamelCase_ : int = False
lowerCamelCase_ : str = False
lowerCamelCase_ : Any = False
def UpperCAmelCase_ (self : Optional[Any] ) -> None:
"""simple docstring"""
lowerCamelCase_ : str = FlaxRegNetModelTester(self )
lowerCamelCase_ : Tuple = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case )
def UpperCAmelCase_ (self : str ) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ (self : int ) -> Optional[int]:
"""simple docstring"""
return
def UpperCAmelCase_ (self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase_ (self : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def UpperCAmelCase_ (self : str ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def UpperCAmelCase_ (self : List[Any] ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase_ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Optional[Any] = model_class(_snake_case )
lowerCamelCase_ : int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : Dict = [*signature.parameters.keys()]
lowerCamelCase_ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def UpperCAmelCase_ (self : Tuple ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : int , _snake_case : Dict , _snake_case : Optional[Any] ):
lowerCamelCase_ : Optional[Any] = model_class(_snake_case )
lowerCamelCase_ : Dict = model(**self._prepare_for_class(_snake_case , _snake_case ) )
lowerCamelCase_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ : Dict = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) , expected_num_stages + 1 )
lowerCamelCase_ , lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Dict = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ : Tuple = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def UpperCAmelCase_ (self : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ : Tuple = self._prepare_for_class(_snake_case , _snake_case )
lowerCamelCase_ : Dict = model_class(_snake_case )
@jax.jit
def model_jitted(_snake_case : Optional[Any] , **_snake_case : Tuple ):
return model(pixel_values=_snake_case , **_snake_case )
with self.subTest('JIT Enabled' ):
lowerCamelCase_ : str = model_jitted(**_snake_case ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCamelCase_ : List[Any] = model_jitted(**_snake_case ).to_tuple()
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
for jitted_output, output in zip(_snake_case , _snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def _a ( ) -> str:
lowerCamelCase_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ (self : Tuple ) -> str:
"""simple docstring"""
lowerCamelCase_ : Dict = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
lowerCamelCase_ : List[str] = self.default_image_processor
lowerCamelCase_ : str = prepare_img()
lowerCamelCase_ : int = image_processor(images=_snake_case , return_tensors='np' )
lowerCamelCase_ : Union[str, Any] = model(**_snake_case )
# verify the logits
lowerCamelCase_ : Any = (1, 1000)
self.assertEqual(outputs.logits.shape , _snake_case )
lowerCamelCase_ : Optional[int] = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4 ) )
| 144
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ ( A__ ):
'''simple docstring'''
def __init__( self: Any , a: Any , a: Optional[int] ):
super().__init__()
# make sure scheduler can always be converted to DDIM
__lowerCamelCase : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self: List[Any] , a: Any = 1 , a: List[Any] = None , a: Optional[int] = 0.0 , a: Union[str, Any] = 50 , a: Union[str, Any] = None , a: Tuple = "pil" , a: List[str] = True , ):
if isinstance(self.unet.config.sample_size , UpperCamelCase_ ):
__lowerCamelCase : Tuple = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
__lowerCamelCase : Dict = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
__lowerCamelCase : Optional[int] = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__lowerCamelCase : Any = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__lowerCamelCase : Any = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
__lowerCamelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase : int = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 669
|
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ = None ):
'''simple docstring'''
if components is None:
UpperCamelCase__ :List[Any] = []
UpperCamelCase__ :Optional[Any] = list(UpperCamelCase_ )
def __len__( self ):
'''simple docstring'''
return len(self.__components )
def __str__( self ):
'''simple docstring'''
return "(" + ",".join(map(UpperCamelCase_ , self.__components ) ) + ")"
def __add__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = len(self )
if size == len(UpperCamelCase_ ):
UpperCamelCase__ :Dict = [self.__components[i] + other.component(UpperCamelCase_ ) for i in range(UpperCamelCase_ )]
return Vector(UpperCamelCase_ )
else:
raise Exception('''must have the same size''' )
def __sub__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = len(self )
if size == len(UpperCamelCase_ ):
UpperCamelCase__ :List[str] = [self.__components[i] - other.component(UpperCamelCase_ ) for i in range(UpperCamelCase_ )]
return Vector(UpperCamelCase_ )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__( self , UpperCamelCase_ ):
'''simple docstring'''
...
@overload
def __mul__( self , UpperCamelCase_ ):
'''simple docstring'''
...
def __mul__( self , UpperCamelCase_ ):
'''simple docstring'''
if isinstance(UpperCamelCase_ , (float, int) ):
UpperCamelCase__ :Optional[Any] = [c * other for c in self.__components]
return Vector(UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(self ) == len(UpperCamelCase_ ):
UpperCamelCase__ :Optional[int] = len(self )
UpperCamelCase__ :List[Any] = [self.__components[i] * other.component(UpperCamelCase_ ) for i in range(UpperCamelCase_ )]
return sum(UpperCamelCase_ )
else: # error case
raise Exception('''invalid operand!''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return Vector(self.__components )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
UpperCamelCase__ :Optional[int] = value
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
UpperCamelCase__ :List[str] = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = False ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self * other
UpperCamelCase__ :Optional[int] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def a ( __a ) -> Vector:
'''simple docstring'''
assert isinstance(__a , __a )
return Vector([0] * dimension )
def a ( __a , __a ) -> Vector:
'''simple docstring'''
assert isinstance(__a , __a ) and (isinstance(__a , __a ))
UpperCamelCase__ :str = [0] * dimension
UpperCamelCase__ :Tuple = 1
return Vector(__a )
def a ( __a , __a , __a ) -> Vector:
'''simple docstring'''
assert (
isinstance(__a , __a )
and isinstance(__a , __a )
and (isinstance(__a , (int, float) ))
)
return x * scalar + y
def a ( __a , __a , __a ) -> Vector:
'''simple docstring'''
random.seed(__a )
UpperCamelCase__ :Optional[Any] = [random.randint(__a , __a ) for _ in range(__a )]
return Vector(__a )
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = matrix
UpperCamelCase__ :Union[str, Any] = w
UpperCamelCase__ :Optional[int] = h
def __str__( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = ''''''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , UpperCamelCase_ ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
UpperCamelCase__ :int = []
for i in range(self.__height ):
UpperCamelCase__ :int = [
self.__matrix[i][j] + other.component(UpperCamelCase_ , UpperCamelCase_ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase_ )
return Matrix(UpperCamelCase_ , self.__width , self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__( self , UpperCamelCase_ ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
UpperCamelCase__ :Optional[Any] = []
for i in range(self.__height ):
UpperCamelCase__ :Optional[int] = [
self.__matrix[i][j] - other.component(UpperCamelCase_ , UpperCamelCase_ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase_ )
return Matrix(UpperCamelCase_ , self.__width , self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__( self , UpperCamelCase_ ):
'''simple docstring'''
...
@overload
def __mul__( self , UpperCamelCase_ ):
'''simple docstring'''
...
def __mul__( self , UpperCamelCase_ ):
'''simple docstring'''
if isinstance(UpperCamelCase_ , UpperCamelCase_ ): # matrix-vector
if len(UpperCamelCase_ ) == self.__width:
UpperCamelCase__ :Any = zero_vector(self.__height )
for i in range(self.__height ):
UpperCamelCase__ :Any = [
self.__matrix[i][j] * other.component(UpperCamelCase_ )
for j in range(self.__width )
]
ans.change_component(UpperCamelCase_ , sum(UpperCamelCase_ ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(UpperCamelCase_ , (int, float) ): # matrix-scalar
UpperCamelCase__ :Union[str, Any] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCamelCase_ , self.__width , self.__height )
return None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.__height
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.__width
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
UpperCamelCase__ :Tuple = value
else:
raise Exception('''change_component: indices out of bounds''' )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
UpperCamelCase__ :str = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCamelCase_ ) ):
UpperCamelCase__ :Union[str, Any] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCamelCase_ , self.__width - 1 , self.__height - 1 ).determinant()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCamelCase_ , UpperCamelCase_ )
else:
raise Exception('''Indices out of bounds''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
UpperCamelCase__ :Optional[Any] = [
self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase_ ) for y in range(self.__width )
]
return sum(UpperCamelCase_ )
def a ( __a ) -> Matrix:
'''simple docstring'''
UpperCamelCase__ :list[list[float]] = [[0] * n for _ in range(__a )]
return Matrix(__a , __a , __a )
def a ( __a , __a , __a , __a ) -> Matrix:
'''simple docstring'''
random.seed(__a )
UpperCamelCase__ :list[list[float]] = [
[random.randint(__a , __a ) for _ in range(__a )] for _ in range(__a )
]
return Matrix(__a , __a , __a )
| 189
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _lowercase ( __UpperCAmelCase ):
_lowerCamelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 190
|
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __UpperCAmelCase , unittest.TestCase ):
_lowerCamelCase = FunnelTokenizer
_lowerCamelCase = FunnelTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def lowerCAmelCase__ ( self ):
super().setUp()
__magic_name__ = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
__magic_name__ = '''UNwant\u00E9d,running'''
__magic_name__ = '''unwanted, running'''
return input_text, output_text
def lowerCAmelCase__ ( self ):
__magic_name__ = self.tokenizer_class(self.vocab_file )
__magic_name__ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [7, 4, 5, 10, 8, 9] )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_tokenizers(do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
__magic_name__ = tokenizer('''UNwant\u00E9d,running''' )
__magic_name__ = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
__magic_name__ = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 190
| 1
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase_ = logging.getLogger(__name__)
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class _a :
'''simple docstring'''
A : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class _a :
'''simple docstring'''
A : str = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
A : str = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
A : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
A : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' ,__UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE : Optional[Any] = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE : List[Any] = processor.get_labels()
SCREAMING_SNAKE_CASE : Optional[Any] = len(__UpperCamelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=__UpperCamelCase ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=__UpperCamelCase ,cache_dir=model_args.cache_dir ,)
# Get datasets
SCREAMING_SNAKE_CASE : List[str] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=__UpperCamelCase ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE : str = (
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=__UpperCamelCase ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(__UpperCamelCase: EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE : Any = np.argmax(p.predictions ,axis=1 )
return {"acc": simple_accuracy(__UpperCamelCase ,p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE : str = DataCollatorWithPadding(__UpperCamelCase ,pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Union[str, Any] = Trainer(
model=__UpperCamelCase ,args=__UpperCamelCase ,train_dataset=__UpperCamelCase ,eval_dataset=__UpperCamelCase ,compute_metrics=__UpperCamelCase ,data_collator=__UpperCamelCase ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE : List[str] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
SCREAMING_SNAKE_CASE : Any = trainer.evaluate()
SCREAMING_SNAKE_CASE : Any = os.path.join(training_args.output_dir ,'eval_results.txt' )
if trainer.is_world_master():
with open(__UpperCamelCase ,'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' ,__UpperCamelCase ,__UpperCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(__UpperCamelCase )
return results
def lowercase__( __UpperCamelCase: Tuple ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 28
|
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(A )
def UpperCamelCase_ ( self, A, A, A, A, A, A = None, A = None, A = None, A = None, A = False, A = True, ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(A, A, self.nets ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = controlnet(
A, A, A, A, A, A, A, A, A, A, A, )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A, A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase_ ( self, A, A = True, A = None, A = False, A = None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A, is_main_process=A, save_function=A, safe_serialization=A, variant=A, )
idx += 1
SCREAMING_SNAKE_CASE : List[Any] = model_path_to_save + F"_{idx}"
@classmethod
def UpperCamelCase_ ( cls, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path
while os.path.isdir(A ):
SCREAMING_SNAKE_CASE : Optional[int] = ControlNetModel.from_pretrained(A, **A )
controlnets.append(A )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"_{idx}"
logger.info(F"{len(A )} controlnets loaded from {pretrained_model_path}." )
if len(A ) == 0:
raise ValueError(
F"No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(A )
| 28
| 1
|
import numpy as np
lowerCamelCase_ = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class __lowerCamelCase :
def __init__( self ) -> None:
snake_case_ = np.array(lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> np.ndarray:
snake_case_ , snake_case_ = np.where(letter == self.SQUARE )
snake_case_ = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> str:
snake_case_ = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCAmelCase_ ( self , lowerCamelCase ) -> str:
snake_case_ = message.lower()
snake_case_ = message.replace(""" """ , """""" )
snake_case_ = message.replace("""j""" , """i""" )
snake_case_ = np.empty((2, len(lowerCamelCase )) )
for letter_index in range(len(lowerCamelCase ) ):
snake_case_ = self.letter_to_numbers(message[letter_index] )
snake_case_ = numbers[0]
snake_case_ = numbers[1]
snake_case_ = first_step.reshape(2 * len(lowerCamelCase ) )
snake_case_ = """"""
for numbers_index in range(len(lowerCamelCase ) ):
snake_case_ = int(second_step[numbers_index * 2] )
snake_case_ = int(second_step[(numbers_index * 2) + 1] )
snake_case_ = self.numbers_to_letter(lowerCamelCase , lowerCamelCase )
snake_case_ = encoded_message + letter
return encoded_message
def lowerCAmelCase_ ( self , lowerCamelCase ) -> str:
snake_case_ = message.lower()
message.replace(""" """ , """""" )
snake_case_ = np.empty(2 * len(lowerCamelCase ) )
for letter_index in range(len(lowerCamelCase ) ):
snake_case_ = self.letter_to_numbers(message[letter_index] )
snake_case_ = numbers[0]
snake_case_ = numbers[1]
snake_case_ = first_step.reshape((2, len(lowerCamelCase )) )
snake_case_ = """"""
for numbers_index in range(len(lowerCamelCase ) ):
snake_case_ = int(second_step[0, numbers_index] )
snake_case_ = int(second_step[1, numbers_index] )
snake_case_ = self.numbers_to_letter(lowerCamelCase , lowerCamelCase )
snake_case_ = decoded_message + letter
return decoded_message
| 161
|
from typing import Any
def UpperCamelCase( lowercase_ ) -> list[Any]:
'''simple docstring'''
if not input_list:
return []
snake_case_ = [input_list.count(lowercase_ ) for value in input_list]
snake_case_ = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 161
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ :Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ :List[Any] = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A( lowerCamelCase__ ):
"""simple docstring"""
A = "big_bird"
def __init__( self , SCREAMING_SNAKE_CASE__=5_03_58 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu_new" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=40_96 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=66 , SCREAMING_SNAKE_CASE__="block_sparse" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=64 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , sep_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
_UpperCamelCase :Optional[int] = vocab_size
_UpperCamelCase :Dict = max_position_embeddings
_UpperCamelCase :Union[str, Any] = hidden_size
_UpperCamelCase :Tuple = num_hidden_layers
_UpperCamelCase :int = num_attention_heads
_UpperCamelCase :str = intermediate_size
_UpperCamelCase :List[str] = hidden_act
_UpperCamelCase :Optional[int] = hidden_dropout_prob
_UpperCamelCase :Optional[Any] = attention_probs_dropout_prob
_UpperCamelCase :Optional[int] = initializer_range
_UpperCamelCase :Tuple = type_vocab_size
_UpperCamelCase :Dict = layer_norm_eps
_UpperCamelCase :str = use_cache
_UpperCamelCase :List[Any] = rescale_embeddings
_UpperCamelCase :str = attention_type
_UpperCamelCase :Any = use_bias
_UpperCamelCase :Dict = block_size
_UpperCamelCase :List[Any] = num_random_blocks
_UpperCamelCase :int = classifier_dropout
class A( lowerCamelCase__ ):
"""simple docstring"""
@property
def _UpperCamelCase( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCamelCase :int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCamelCase :Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 355
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :List[str] = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_28, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_42, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
_UpperCamelCase :Union[str, Any] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_28,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_42,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self ) -> Any:
"""simple docstring"""
_UpperCamelCase :Optional[int] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , x.transpose() ) )
_UpperCamelCase :Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] = np.random.randn(3 , 4 )
_UpperCamelCase :Any = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , transpose(SCREAMING_SNAKE_CASE__ ).numpy() ) )
_UpperCamelCase :int = np.random.randn(3 , 4 , 5 )
_UpperCamelCase :Any = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _UpperCamelCase( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase :Dict = np.random.randn(3 , 4 )
_UpperCamelCase :List[str] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , transpose(SCREAMING_SNAKE_CASE__ ).numpy() ) )
_UpperCamelCase :int = np.random.randn(3 , 4 , 5 )
_UpperCamelCase :Dict = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
_UpperCamelCase :int = np.random.randn(3 , 4 )
_UpperCamelCase :Optional[Any] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , np.asarray(transpose(SCREAMING_SNAKE_CASE__ ) ) ) )
_UpperCamelCase :Dict = np.random.randn(3 , 4 , 5 )
_UpperCamelCase :Dict = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , np.asarray(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) ) ) )
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :int = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , np.reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) ) )
_UpperCamelCase :Dict = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , np.reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) ) )
@require_torch
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] = np.random.randn(3 , 4 )
_UpperCamelCase :Optional[Any] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ).numpy() ) )
_UpperCamelCase :str = np.random.randn(3 , 4 , 5 )
_UpperCamelCase :str = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ).numpy() ) )
@require_tf
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :Optional[int] = np.random.randn(3 , 4 )
_UpperCamelCase :Dict = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ).numpy() ) )
_UpperCamelCase :Union[str, Any] = np.random.randn(3 , 4 , 5 )
_UpperCamelCase :Optional[int] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ).numpy() ) )
@require_flax
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :List[str] = np.random.randn(3 , 4 )
_UpperCamelCase :str = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , np.asarray(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) ) ) )
_UpperCamelCase :Dict = np.random.randn(3 , 4 , 5 )
_UpperCamelCase :Union[str, Any] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , np.asarray(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) ) ) )
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :Dict = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , np.squeeze(SCREAMING_SNAKE_CASE__ ) ) )
_UpperCamelCase :List[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , np.squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) ) )
@require_torch
def _UpperCamelCase( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :str = np.random.randn(1 , 3 , 4 )
_UpperCamelCase :Any = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , squeeze(SCREAMING_SNAKE_CASE__ ).numpy() ) )
_UpperCamelCase :List[str] = np.random.randn(1 , 4 , 1 , 5 )
_UpperCamelCase :List[Any] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ).numpy() ) )
@require_tf
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :str = np.random.randn(1 , 3 , 4 )
_UpperCamelCase :str = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , squeeze(SCREAMING_SNAKE_CASE__ ).numpy() ) )
_UpperCamelCase :str = np.random.randn(1 , 4 , 1 , 5 )
_UpperCamelCase :str = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ).numpy() ) )
@require_flax
def _UpperCamelCase( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] = np.random.randn(1 , 3 , 4 )
_UpperCamelCase :Optional[Any] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , np.asarray(squeeze(SCREAMING_SNAKE_CASE__ ) ) ) )
_UpperCamelCase :Dict = np.random.randn(1 , 4 , 1 , 5 )
_UpperCamelCase :int = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , np.asarray(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) ) ) )
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , np.expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) ) )
@require_torch
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :List[str] = np.random.randn(3 , 4 )
_UpperCamelCase :Union[str, Any] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ).numpy() ) )
@require_tf
def _UpperCamelCase( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :str = np.random.randn(3 , 4 )
_UpperCamelCase :int = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ).numpy() ) )
@require_flax
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Tuple = np.random.randn(3 , 4 )
_UpperCamelCase :str = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , np.asarray(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) ) ) )
| 355
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_snake_case = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 703
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 567
| 0
|
'''simple docstring'''
def lowerCamelCase__ ( _A ):
if num < 0:
return False
a : int = num
a : int = 0
while num > 0:
a : Tuple = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 526
|
'''simple docstring'''
import numpy as np
def lowerCamelCase__ ( _A ):
return 1 / (1 + np.exp(-vector ))
def lowerCamelCase__ ( _A ):
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 526
| 1
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__UpperCamelCase : int = 'src/transformers'
__UpperCamelCase : Optional[Any] = 'docs/source/en'
__UpperCamelCase : Union[str, Any] = '.'
def A ( _lowercase , _lowercase , _lowercase ) -> Dict:
with open(_lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE : Any = f.readlines()
# Find the start prompt.
SCREAMING_SNAKE_CASE : List[Any] = 0
while not lines[start_index].startswith(_lowercase ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE : Tuple = start_index
while not lines[end_index].startswith(_lowercase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__UpperCamelCase : Tuple = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
__UpperCamelCase : Union[str, Any] = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
__UpperCamelCase : Dict = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__UpperCamelCase : Tuple = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
__UpperCamelCase : List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
def A ( _lowercase ) -> str:
SCREAMING_SNAKE_CASE : Tuple = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , _lowercase )
return [m.group(0 ) for m in matches]
def A ( _lowercase , _lowercase ) -> Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = 2 if text == '''✅''' or text == '''❌''' else len(_lowercase )
SCREAMING_SNAKE_CASE : Tuple = (width - text_length) // 2
SCREAMING_SNAKE_CASE : List[str] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def A ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Tuple = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE : List[Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
SCREAMING_SNAKE_CASE : Optional[int] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
SCREAMING_SNAKE_CASE : List[Any] = collections.defaultdict(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = collections.defaultdict(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = collections.defaultdict(_lowercase )
SCREAMING_SNAKE_CASE : Any = collections.defaultdict(_lowercase )
SCREAMING_SNAKE_CASE : int = collections.defaultdict(_lowercase )
# Let's lookup through all transformers object (once).
for attr_name in dir(_lowercase ):
SCREAMING_SNAKE_CASE : Any = None
if attr_name.endswith('''Tokenizer''' ):
SCREAMING_SNAKE_CASE : Optional[int] = slow_tokenizers
SCREAMING_SNAKE_CASE : Any = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
SCREAMING_SNAKE_CASE : str = fast_tokenizers
SCREAMING_SNAKE_CASE : Union[str, Any] = attr_name[:-13]
elif _re_tf_models.match(_lowercase ) is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = tf_models
SCREAMING_SNAKE_CASE : int = _re_tf_models.match(_lowercase ).groups()[0]
elif _re_flax_models.match(_lowercase ) is not None:
SCREAMING_SNAKE_CASE : Any = flax_models
SCREAMING_SNAKE_CASE : str = _re_flax_models.match(_lowercase ).groups()[0]
elif _re_pt_models.match(_lowercase ) is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = pt_models
SCREAMING_SNAKE_CASE : Optional[Any] = _re_pt_models.match(_lowercase ).groups()[0]
if lookup_dict is not None:
while len(_lowercase ) > 0:
if attr_name in model_name_to_prefix.values():
SCREAMING_SNAKE_CASE : Optional[Any] = True
break
# Try again after removing the last word in the name
SCREAMING_SNAKE_CASE : Dict = ''''''.join(camel_case_split(_lowercase )[:-1] )
# Let's build that table!
SCREAMING_SNAKE_CASE : Union[str, Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
SCREAMING_SNAKE_CASE : int = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
SCREAMING_SNAKE_CASE : Optional[Any] = [len(_lowercase ) + 2 for c in columns]
SCREAMING_SNAKE_CASE : str = max([len(_lowercase ) for name in model_names] ) + 2
# Build the table per se
SCREAMING_SNAKE_CASE : Any = '''|''' + '''|'''.join([_center_text(_lowercase , _lowercase ) for c, w in zip(_lowercase , _lowercase )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
SCREAMING_SNAKE_CASE : str = {True: '''✅''', False: '''❌'''}
for name in model_names:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_name_to_prefix[name]
SCREAMING_SNAKE_CASE : List[Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_lowercase , _lowercase ) for l, w in zip(_lowercase , _lowercase )] ) + "|\n"
return table
def A ( _lowercase=False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : int = _find_text_in_file(
filename=os.path.join(_lowercase , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
SCREAMING_SNAKE_CASE : Optional[int] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_lowercase , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__UpperCamelCase : str = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 713
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
])
class lowercase__ ( unittest.TestCase):
def __A ( self : Any ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=UpperCamelCase__ , )
assert hasattr(self , '''env''' )
def __A ( self : str , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
SCREAMING_SNAKE_CASE : Any = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCamelCase__ , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version='''py36''' , )
def __A ( self : Optional[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __A ( self : Tuple , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCamelCase__ )
| 34
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class UpperCAmelCase :
def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=1_3 , __lowerCamelCase : str=7 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=True , __lowerCamelCase : int=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=9_9 , __lowerCamelCase : str=3_2 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : Optional[Any]=3_7 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Any=5_1_2 , __lowerCamelCase : Any=1_6 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Optional[int]=0.0_2 , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[str]=0 , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
_snake_case = projection_dim
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
_snake_case = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Tuple ):
"""simple docstring"""
_snake_case = TFDPRContextEncoder(config=__lowerCamelCase )
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
_snake_case = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
_snake_case = model(__lowerCamelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : List[str] ):
"""simple docstring"""
_snake_case = TFDPRQuestionEncoder(config=__lowerCamelCase )
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
_snake_case = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
_snake_case = model(__lowerCamelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
_snake_case = TFDPRReader(config=__lowerCamelCase )
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Any = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
A__ : Optional[int] = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
A__ : List[Any] = False
A__ : Any = False
A__ : Optional[int] = False
A__ : Optional[Any] = False
A__ : str = False
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = TFDPRModelTester(self )
_snake_case = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__lowerCamelCase )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__lowerCamelCase )
@slow
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFDPRContextEncoder.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFDPRContextEncoder.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFDPRQuestionEncoder.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFDPRReader.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
_snake_case = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
_snake_case = model(__lowerCamelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_snake_case = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 103
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=10 , SCREAMING_SNAKE_CASE__ : Tuple=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE__ : Any=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="relu" , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : Any=None , ) -> Optional[int]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embeddings_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE__ )
def a ( self : Any ) -> Any:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def a ( self : Union[str, Any] ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
__lowerCAmelCase = RegNetModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict ) -> Any:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = RegNetForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[Any] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : Tuple = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : int = False
def a ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase = RegNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a ( self : Union[str, Any] ) -> str:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def a ( self : str ) -> Union[str, Any]:
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def a ( self : List[str] ) -> int:
pass
def a ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=SCREAMING_SNAKE_CASE__ )
for name, module in model.named_modules():
if isinstance(SCREAMING_SNAKE_CASE__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def a ( self : Union[str, Any] ) -> Any:
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
__lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowerCAmelCase = layer_type
__lowerCAmelCase = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def a ( self : Any ) -> Optional[Any]:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = RegNetModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def UpperCamelCase_ ( ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a ( self : List[Any] ) -> Dict:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def a ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCAmelCase = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
__lowerCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 427
| 0
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
UpperCamelCase__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ = model(snake_case_ )['last_hidden_state'].detach()
self.assertEqual(output.shape , snake_case_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , snake_case_ , atol=1E-3 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
UpperCamelCase__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__ = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ = model(snake_case_ )['last_hidden_state'].detach()
self.assertEqual(output.shape , snake_case_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , snake_case_ , atol=1E-3 ) )
| 20
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=False , snake_case_=True , snake_case_="None" , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> str:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = relative_attention
UpperCamelCase__ = position_biased_input
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = scope
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = DebertaVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = DebertaVaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = DebertaVaForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
a : Dict =(
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Tuple =True
a : Union[str, Any] =False
a : Tuple =False
a : Union[str, Any] =False
a : Dict =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = DebertaVaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = DebertaVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
UpperCamelCase__ = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ )[0]
# compare the actual values for a slice.
UpperCamelCase__ = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
| 20
| 1
|
import socket
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__UpperCAmelCase : int = socket.gethostname()
__UpperCAmelCase : Optional[int] = 12312
sock.connect((host, port) )
sock.send(b'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
__UpperCAmelCase : List[Any] = sock.recv(1024 )
if not data:
break
out_file.write(lowercase_ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 462
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase = 16
lowerCAmelCase = 32
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = 16 , lowercase_ = "bert-base-cased" ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(lowercase_ )
__UpperCAmelCase : int = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowercase_ ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase : List[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCAmelCase : Dict = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=lowercase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase : List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(lowercase_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__UpperCAmelCase : Optional[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
__UpperCAmelCase : Any = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
model.eval()
__UpperCAmelCase : Optional[int] = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase : Any = model(**lowercase_ )
__UpperCAmelCase : Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase_ ) - 1:
__UpperCAmelCase : Optional[int] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCAmelCase : List[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
__UpperCAmelCase : Union[str, Any] = metric.compute()
return eval_metric["accuracy"]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase : Optional[Any] = config['''lr''']
__UpperCAmelCase : List[Any] = int(config['''num_epochs'''] )
__UpperCAmelCase : Dict = int(config['''seed'''] )
__UpperCAmelCase : Tuple = int(config['''batch_size'''] )
__UpperCAmelCase : Any = args.model_name_or_path
set_seed(lowercase_ )
__UpperCAmelCase , __UpperCAmelCase : Tuple = get_dataloaders(lowercase_ , lowercase_ , lowercase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase : List[str] = AutoModelForSequenceClassification.from_pretrained(lowercase_ , return_dict=lowercase_ )
# Instantiate optimizer
__UpperCAmelCase : str = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCAmelCase : int = optimizer_cls(params=model.parameters() , lr=lowercase_ )
if accelerator.state.deepspeed_plugin is not None:
__UpperCAmelCase : int = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : str = (len(lowercase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCAmelCase : List[str] = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=0 , num_training_steps=lowercase_ , )
else:
__UpperCAmelCase : Union[str, Any] = DummyScheduler(lowercase_ , total_num_steps=lowercase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# We need to keep track of how many total steps we have iterated over
__UpperCAmelCase : Tuple = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCAmelCase : Any = 0
__UpperCAmelCase : Tuple = evaluate.load('''glue''' , '''mrpc''' )
__UpperCAmelCase : Any = num_epochs
if args.partial_train_epoch is not None:
__UpperCAmelCase : Any = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__UpperCAmelCase : List[Any] = args.resume_from_checkpoint.split('''epoch_''' )[1]
__UpperCAmelCase : Union[str, Any] = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__UpperCAmelCase : Tuple = int(lowercase_ ) + 1
__UpperCAmelCase : Optional[int] = evaluation_loop(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
accelerator.print('''resumed checkpoint performance:''' , lowercase_ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f"state_{starting_epoch-1}.json" ) , '''r''' ) as f:
__UpperCAmelCase : str = json.load(lowercase_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__UpperCAmelCase : Tuple = {}
for epoch in range(lowercase_ , lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
__UpperCAmelCase : Dict = model(**lowercase_ )
__UpperCAmelCase : Union[str, Any] = outputs.loss
__UpperCAmelCase : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__UpperCAmelCase : Optional[Any] = f"epoch_{epoch}"
__UpperCAmelCase : Optional[Any] = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
__UpperCAmelCase : Any = evaluation_loop(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = accuracy
__UpperCAmelCase : Dict = lr_scheduler.get_lr()[0]
__UpperCAmelCase : List[str] = optimizer.param_groups[0]['''lr''']
__UpperCAmelCase : List[Any] = epoch
__UpperCAmelCase : Any = overall_step
accelerator.print(f"epoch {epoch}:" , lowercase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"state_{epoch}.json" ) , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ )
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=lowercase_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowercase_ , )
parser.add_argument(
'''--output_dir''' , type=lowercase_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=lowercase_ , default=lowercase_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=lowercase_ , default=lowercase_ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=lowercase_ , default=2 , help='''Number of train epochs.''' , )
__UpperCAmelCase : Optional[int] = parser.parse_args()
__UpperCAmelCase : str = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 462
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "lilt"
def __init__(self : Optional[int] , UpperCAmelCase_ : List[str]=30_522 , UpperCAmelCase_ : Tuple=768 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : str=3_072 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-1_2 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : List[Any]="absolute" , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : int=1_024 , **UpperCAmelCase_ : Optional[Any] , ) ->List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Tuple =vocab_size
lowerCamelCase__: int =hidden_size
lowerCamelCase__: int =num_hidden_layers
lowerCamelCase__: int =num_attention_heads
lowerCamelCase__: Optional[int] =hidden_act
lowerCamelCase__: int =intermediate_size
lowerCamelCase__: str =hidden_dropout_prob
lowerCamelCase__: List[Any] =attention_probs_dropout_prob
lowerCamelCase__: List[Any] =max_position_embeddings
lowerCamelCase__: Union[str, Any] =type_vocab_size
lowerCamelCase__: Dict =initializer_range
lowerCamelCase__: Tuple =layer_norm_eps
lowerCamelCase__: Tuple =position_embedding_type
lowerCamelCase__: Dict =classifier_dropout
lowerCamelCase__: Union[str, Any] =channel_shrink_ratio
lowerCamelCase__: List[str] =max_ad_position_embeddings
| 437
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Dict =self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "tf_padding"))
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "depth_multiplier"))
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any]=13 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[Any]=32 , UpperCAmelCase_ : Any=0.25 , UpperCAmelCase_ : int=8 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : Any=6 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Dict="relu6" , UpperCAmelCase_ : Optional[int]=1_280 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[Any]=10 , UpperCAmelCase_ : Optional[int]=None , ) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =parent
lowerCamelCase__: Optional[Any] =batch_size
lowerCamelCase__: List[str] =num_channels
lowerCamelCase__: Dict =image_size
lowerCamelCase__: Tuple =depth_multiplier
lowerCamelCase__: Tuple =depth_divisible_by
lowerCamelCase__: List[str] =min_depth
lowerCamelCase__: List[str] =expand_ratio
lowerCamelCase__: Union[str, Any] =tf_padding
lowerCamelCase__: Optional[Any] =output_stride
lowerCamelCase__: Tuple =first_layer_is_expansion
lowerCamelCase__: Any =finegrained_output
lowerCamelCase__: Union[str, Any] =hidden_act
lowerCamelCase__: Union[str, Any] =last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier)
lowerCamelCase__: int =classifier_dropout_prob
lowerCamelCase__: List[str] =use_labels
lowerCamelCase__: Any =is_training
lowerCamelCase__: Dict =num_labels
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: List[Any] =scope
def SCREAMING_SNAKE_CASE_ (self : int) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCamelCase__: Dict =None
lowerCamelCase__: int =None
if self.use_labels:
lowerCamelCase__: List[str] =ids_tensor([self.batch_size] , self.num_labels)
lowerCamelCase__: List[Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
lowerCamelCase__: Any =self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[int]:
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str]) ->int:
'''simple docstring'''
lowerCamelCase__: List[str] =MobileNetVaModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Dict =model(UpperCAmelCase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: int =self.num_labels
lowerCamelCase__: Optional[int] =MobileNetVaForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.num_labels
lowerCamelCase__: List[str] =MobileNetVaForSemanticSegmentation(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Any =model(UpperCAmelCase_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__: List[str] =model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =config_and_inputs
lowerCamelCase__: Tuple ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase_ = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =MobileNetVaModelTester(self)
lowerCamelCase__: Union[str, Any] =MobileNetVaConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds")
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Any:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings")
def SCREAMING_SNAKE_CASE_ (self : int) ->str:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV2 does not output attentions")
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Optional[Any] =model_class(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__: Tuple =[*signature.parameters.keys()]
lowerCamelCase__: Union[str, Any] =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Any:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str):
lowerCamelCase__: List[str] =model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
lowerCamelCase__: Any =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
lowerCamelCase__: Optional[Any] =outputs.hidden_states
lowerCamelCase__: List[str] =16
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Union[str, Any] =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__: Optional[int] =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Dict:
'''simple docstring'''
lowerCamelCase__: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Union[str, Any]:
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: Optional[int] =MobileNetVaModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def lowerCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: List[Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]:
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224") if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224").to(UpperCAmelCase_)
lowerCamelCase__: Dict =self.default_image_processor
lowerCamelCase__: str =prepare_img()
lowerCamelCase__: int =image_processor(images=UpperCAmelCase_ , return_tensors="pt").to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
lowerCamelCase__: str =model(**UpperCAmelCase_)
# verify the logits
lowerCamelCase__: Optional[Any] =torch.Size((1, 1_001))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
lowerCamelCase__: List[str] =torch.tensor([0.2445, -1.1993, 0.1905]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4))
@slow
def SCREAMING_SNAKE_CASE_ (self : int) ->List[str]:
'''simple docstring'''
lowerCamelCase__: List[Any] =MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513")
lowerCamelCase__: str =model.to(UpperCAmelCase_)
lowerCamelCase__: List[Any] =MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513")
lowerCamelCase__: int =prepare_img()
lowerCamelCase__: int =image_processor(images=UpperCAmelCase_ , return_tensors="pt").to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
lowerCamelCase__: str =model(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =outputs.logits
# verify the logits
lowerCamelCase__: Optional[int] =torch.Size((1, 21, 65, 65))
self.assertEqual(logits.shape , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=UpperCAmelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase_ , atol=1E-4))
| 437
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Union[str, Any] = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 564
|
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : List[str] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = '''segformer'''
def __init__( self : Optional[Any] , A_ : Tuple=3 , A_ : int=4 , A_ : int=[2, 2, 2, 2] , A_ : Any=[8, 4, 2, 1] , A_ : str=[32, 64, 160, 256] , A_ : str=[7, 3, 3, 3] , A_ : Dict=[4, 2, 2, 2] , A_ : List[str]=[1, 2, 5, 8] , A_ : Union[str, Any]=[4, 4, 4, 4] , A_ : Union[str, Any]="gelu" , A_ : int=0.0 , A_ : Tuple=0.0 , A_ : List[str]=0.1 , A_ : Tuple=0.02 , A_ : Optional[Any]=0.1 , A_ : int=1E-6 , A_ : Optional[int]=256 , A_ : Tuple=255 , **A_ : str , ) -> str:
super().__init__(**A_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'''
''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , A_ , )
__snake_case = num_channels
__snake_case = num_encoder_blocks
__snake_case = depths
__snake_case = sr_ratios
__snake_case = hidden_sizes
__snake_case = patch_sizes
__snake_case = strides
__snake_case = mlp_ratios
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = classifier_dropout_prob
__snake_case = initializer_range
__snake_case = drop_path_rate
__snake_case = layer_norm_eps
__snake_case = decoder_hidden_size
__snake_case = kwargs.get('''reshape_last_stage''' , A_ )
__snake_case = semantic_loss_ignore_index
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = version.parse('''1.11''' )
@property
def lowercase ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase ( self : List[Any] ) -> float:
return 1E-4
@property
def lowercase ( self : Any ) -> int:
return 12
| 564
| 1
|
__a : Any = "Alexander Joslin"
import operator as op
from .stack import Stack
def _SCREAMING_SNAKE_CASE ( __lowercase : str ) -> int:
"""simple docstring"""
__A = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
__A = Stack()
__A = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(__lowercase )
elif i == ")":
# RULE 4
__A = operator_stack.peek()
operator_stack.pop()
__A = operand_stack.peek()
operand_stack.pop()
__A = operand_stack.peek()
operand_stack.pop()
__A = operators[opr](__lowercase , __lowercase )
operand_stack.push(__lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__a : Union[str, Any] = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 199
|
def _SCREAMING_SNAKE_CASE ( __lowercase : List[Any] , __lowercase : Dict , __lowercase : str ) -> Dict:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__lowercase , n - 1 , __lowercase ) * a) % mod
else:
__A = binary_exponentiation(__lowercase , n / 2 , __lowercase )
return (b * b) % mod
# a prime number
__a : Union[str, Any] = 701
__a : Dict = 1000000000
__a : Union[str, Any] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 199
| 1
|
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __SCREAMING_SNAKE_CASE ( yaml.SafeLoader ):
def __magic_name__ ( self : Any , __lowercase : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : str =[self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE__ : Optional[int] =[tuple(__lowercase ) if isinstance(__lowercase , __lowercase ) else key for key in keys]
SCREAMING_SNAKE_CASE__ : Optional[int] =Counter(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =[key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"Got duplicate yaml keys: {duplicate_keys}" )
def __magic_name__ ( self : Dict , __lowercase : Any , __lowercase : str=False ) -> int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =super().construct_mapping(__lowercase , deep=__lowercase )
self._check_no_duplicates_on_constructed_node(__lowercase )
return mapping
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE__ : List[str] =full_content[1:].index('''---''' ) + 1
SCREAMING_SNAKE_CASE__ : str ='''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(UpperCamelCase__ )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
# class attributes
snake_case_ = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def __magic_name__ ( cls : Optional[int] , __lowercase : Path ) -> "DatasetMetadata":
with open(__lowercase , encoding='''utf-8''' ) as readme_file:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =_split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__lowercase )
else:
return cls()
def __magic_name__ ( self : int , __lowercase : Path ) -> int:
if path.exists():
with open(__lowercase , encoding='''utf-8''' ) as readme_file:
SCREAMING_SNAKE_CASE__ : Optional[int] =readme_file.read()
else:
SCREAMING_SNAKE_CASE__ : List[str] =None
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self._to_readme(__lowercase )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(__lowercase )
def __magic_name__ ( self : int , __lowercase : Optional[str] = None ) -> str:
if readme_content is not None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =_split_yaml_from_readme(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] ='''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def __magic_name__ ( cls : Union[str, Any] , __lowercase : str ) -> "DatasetMetadata":
SCREAMING_SNAKE_CASE__ : Tuple =yaml.load(__lowercase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__lowercase )
def __magic_name__ ( self : Optional[int] ) -> str:
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__lowercase , allow_unicode=__lowercase , encoding='''utf-8''' , ).decode('''utf-8''' )
a_ = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
a_ = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
a_ = ap.parse_args()
a_ = Path(args.readme_filepath)
a_ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 296
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 296
| 1
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Optional[Any] =(DPMSolverSinglestepScheduler,)
lowerCamelCase : Optional[Any] =(("num_inference_steps", 25),)
def SCREAMING_SNAKE_CASE ( self : str , **lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[str] = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
"""sample_max_value""": 1.0,
"""algorithm_type""": """dpmsolver++""",
"""solver_type""": """midpoint""",
"""lambda_min_clipped""": -float("""inf""" ),
"""variance_type""": None,
}
config.update(**lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : Union[str, Any]=0 , **lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowerCAmelCase : Dict = dict(self.forward_default_kwargs )
__lowerCAmelCase : List[str] = kwargs.pop("""num_inference_steps""" , lowerCAmelCase )
__lowerCAmelCase : Any = self.dummy_sample
__lowerCAmelCase : Dict = 0.1 * sample
__lowerCAmelCase : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase : Any = self.get_scheduler_config(**lowerCAmelCase )
__lowerCAmelCase : Any = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
__lowerCAmelCase : Dict = scheduler_class.from_pretrained(lowerCAmelCase )
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase : Tuple = sample, sample
for t in range(lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase : Any = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
__lowerCAmelCase : Union[str, Any] = new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : List[str]=0 , **lowerCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : str = dict(self.forward_default_kwargs )
__lowerCAmelCase : Union[str, Any] = kwargs.pop("""num_inference_steps""" , lowerCAmelCase )
__lowerCAmelCase : Optional[int] = self.dummy_sample
__lowerCAmelCase : Dict = 0.1 * sample
__lowerCAmelCase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase : Optional[int] = self.get_scheduler_config()
__lowerCAmelCase : List[Any] = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
__lowerCAmelCase : Dict = scheduler_class.from_pretrained(lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase : Union[str, Any] = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
__lowerCAmelCase : Optional[int] = new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : str=None , **lowerCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
if scheduler is None:
__lowerCAmelCase : Dict = self.scheduler_classes[0]
__lowerCAmelCase : Dict = self.get_scheduler_config(**lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = scheduler_class(**lowerCAmelCase )
__lowerCAmelCase : List[str] = self.scheduler_classes[0]
__lowerCAmelCase : int = self.get_scheduler_config(**lowerCAmelCase )
__lowerCAmelCase : Any = scheduler_class(**lowerCAmelCase )
__lowerCAmelCase : str = 10
__lowerCAmelCase : str = self.dummy_model()
__lowerCAmelCase : Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Optional[Any] = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : int = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : str = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase : Union[str, Any] = 50
__lowerCAmelCase : Union[str, Any] = self.dummy_model()
__lowerCAmelCase : Any = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__lowerCAmelCase : Dict = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : str = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
__lowerCAmelCase : List[str] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase : int = self.full_loop(scheduler=lowerCAmelCase )
__lowerCAmelCase : Any = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
__lowerCAmelCase : Optional[Any] = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase : Any = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase : List[str] = self.full_loop(scheduler=lowerCAmelCase )
__lowerCAmelCase : str = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , algorithm_type="""dpmsolver++""" , solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , prediction_type=lowerCAmelCase , algorithm_type=lowerCAmelCase , )
__lowerCAmelCase : Optional[Any] = self.full_loop(
solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , prediction_type=lowerCAmelCase , algorithm_type=lowerCAmelCase , )
assert not torch.isnan(lowerCAmelCase ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
self.check_over_configs(lower_order_final=lowerCAmelCase )
self.check_over_configs(lower_order_final=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.check_over_configs(variance_type=lowerCAmelCase )
self.check_over_configs(variance_type="""learned_range""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowerCAmelCase , time_step=0 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.full_loop()
__lowerCAmelCase : Any = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.full_loop(use_karras_sigmas=lowerCAmelCase )
__lowerCAmelCase : Tuple = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.full_loop(prediction_type="""v_prediction""" )
__lowerCAmelCase : Optional[int] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=lowerCAmelCase )
__lowerCAmelCase : List[str] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.scheduler_classes[0]
__lowerCAmelCase : Union[str, Any] = self.get_scheduler_config(thresholding=lowerCAmelCase , dynamic_thresholding_ratio=0 )
__lowerCAmelCase : str = scheduler_class(**lowerCAmelCase )
__lowerCAmelCase : List[str] = 10
__lowerCAmelCase : str = self.dummy_model()
__lowerCAmelCase : List[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Union[str, Any] = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[Any] = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 702
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : str , *lowerCAmelCase : List[Any] , **lowerCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
self.check_model_type(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : List[Any] = {}, {}
if padding is not None:
__lowerCAmelCase : List[Any] = padding
if truncation is not None:
__lowerCAmelCase : int = truncation
if top_k is not None:
__lowerCAmelCase : int = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[int] , lowerCAmelCase : Union["Image.Image", str] , lowerCAmelCase : str = None , **lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
if isinstance(lowerCAmelCase , (Image.Image, str) ) and isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowerCAmelCase : Any = {"""image""": image, """question""": question}
else:
__lowerCAmelCase : List[str] = image
__lowerCAmelCase : str = super().__call__(lowerCAmelCase , **lowerCAmelCase )
return results
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Union[str, Any]=False ) -> Any:
"""simple docstring"""
__lowerCAmelCase : int = load_image(inputs["""image"""] )
__lowerCAmelCase : Optional[Any] = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=lowerCAmelCase , truncation=lowerCAmelCase )
__lowerCAmelCase : List[Any] = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
return model_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model(**lowerCAmelCase )
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str]=5 ) -> Union[str, Any]:
"""simple docstring"""
if top_k > self.model.config.num_labels:
__lowerCAmelCase : Any = self.model.config.num_labels
if self.framework == "pt":
__lowerCAmelCase : Optional[Any] = model_outputs.logits.sigmoid()[0]
__lowerCAmelCase ,__lowerCAmelCase : Tuple = probs.topk(lowerCAmelCase )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
__lowerCAmelCase : Any = scores.tolist()
__lowerCAmelCase : int = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase , lowerCAmelCase )]
| 218
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : List[Any] ={
"""configuration_bridgetower""": [
"""BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BridgeTowerConfig""",
"""BridgeTowerTextConfig""",
"""BridgeTowerVisionConfig""",
],
"""processing_bridgetower""": ["""BridgeTowerProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] =["""BridgeTowerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict =[
"""BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BridgeTowerForContrastiveLearning""",
"""BridgeTowerForImageAndTextRetrieval""",
"""BridgeTowerForMaskedLM""",
"""BridgeTowerModel""",
"""BridgeTowerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
A_ : Optional[int] =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 483
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
A_ : List[Any] ="""\
"""
A_ : Optional[Any] ="""
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
A_ : Any ="""
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION)
class lowercase_ ( datasets.Metric):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 16 , _UpperCAmelCase = True , _UpperCAmelCase=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
a_ = """cuda"""
else:
a_ = """cuda""" if torch.cuda.is_available() else """cpu"""
a_ = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase )
a_ = model.to(_UpperCAmelCase )
a_ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
a_ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_UpperCAmelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
a_ = model.config.max_length - 1
else:
a_ = model.config.max_length
a_ = tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors="""pt""" , return_attention_mask=_UpperCAmelCase , ).to(_UpperCAmelCase )
a_ = encodings["""input_ids"""]
a_ = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
a_ = []
a_ = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ) ):
a_ = min(start_index + batch_size , len(_UpperCAmelCase ) )
a_ = encoded_texts[start_index:end_index]
a_ = attn_masks[start_index:end_index]
if add_start_token:
a_ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_UpperCAmelCase )
a_ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
a_ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_UpperCAmelCase ), attn_mask] , dim=1 )
a_ = encoded_batch
with torch.no_grad():
a_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).logits
a_ = out_logits[..., :-1, :].contiguous()
a_ = labels[..., 1:].contiguous()
a_ = attn_mask[..., 1:].contiguous()
a_ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _UpperCAmelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_UpperCAmelCase )}
| 483
| 1
|
import math
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__=0 ): # a graph with Node 0,1,...,N-1
A : List[str] = n
A : Tuple = [
[math.inf for j in range(0, __a )] for i in range(0, __a )
] # adjacency matrix for weight
A : Union[str, Any] = [
[math.inf for j in range(0, __a )] for i in range(0, __a )
] # dp[i][j] stores minimum distance from i to j
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Union[str, Any] = w
def _lowerCAmelCase ( self ):
for k in range(0, self.n ):
for i in range(0, self.n ):
for j in range(0, self.n ):
A : List[Any] = min(self.dp[i][j], self.dp[i][k] + self.dp[k][j] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
return self.dp[u][v]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[str] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 717
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = LongformerTokenizer
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Optional[Any] = LongformerTokenizerFast
__lowerCamelCase : Tuple = True
def _lowerCAmelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
A : Dict = dict(zip(lowerCamelCase__, range(len(lowerCamelCase__ ) ) ) )
A : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A : Optional[Any] = {"""unk_token""": """<unk>"""}
A : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
A : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + """\n""" )
with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase__ ) )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Tuple = """lower newer"""
A : List[Any] = """lower newer"""
return input_text, output_text
def _lowerCAmelCase ( self ):
A : Tuple = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map )
A : List[str] = """lower newer"""
A : int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
A : List[str] = tokenizer.tokenize(lowerCamelCase__ ) # , add_prefix_space=True)
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
A : Any = tokens + [tokenizer.unk_token]
A : List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ), lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Dict = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""", add_special_tokens=lowerCamelCase__ ), [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""", add_special_tokens=lowerCamelCase__ ), [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2], )
@slow
def _lowerCAmelCase ( self ):
A : Dict = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
A : Tuple = tokenizer.encode("""sequence builders""", add_special_tokens=lowerCamelCase__ )
A : List[str] = tokenizer.encode("""multi-sequence build""", add_special_tokens=lowerCamelCase__ )
A : Any = tokenizer.encode(
"""sequence builders""", add_special_tokens=lowerCamelCase__, add_prefix_space=lowerCamelCase__ )
A : str = tokenizer.encode(
"""sequence builders""", """multi-sequence build""", add_special_tokens=lowerCamelCase__, add_prefix_space=lowerCamelCase__ )
A : str = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
A : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__, lowerCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _lowerCAmelCase ( self ):
A : str = self.get_tokenizer()
A : List[Any] = """Encode this sequence."""
A : Tuple = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
A : Tuple = tokenizer.encode(lowerCamelCase__, add_special_tokens=lowerCamelCase__, add_prefix_space=lowerCamelCase__ )
A : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCamelCase__, lowerCamelCase__ )
A : Any = tokenizer.encode(lowerCamelCase__, add_special_tokens=lowerCamelCase__, add_prefix_space=lowerCamelCase__ )
A : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCamelCase__, lowerCamelCase__ )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
A : Optional[Any] = tokenizer.encode(lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
A : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCamelCase__, lowerCamelCase__ )
# Testing spaces after special tokens
A : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(lowerCamelCase__, lstrip=lowerCamelCase__, rstrip=lowerCamelCase__ )} ) # mask token has a left space
A : Union[str, Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
A : str = """Encode <mask> sequence"""
A : str = """Encode <mask>sequence"""
A : List[Any] = tokenizer.encode(lowerCamelCase__ )
A : str = encoded.index(lowerCamelCase__ )
A : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCamelCase__, lowerCamelCase__ )
A : Any = tokenizer.encode(lowerCamelCase__ )
A : List[str] = encoded.index(lowerCamelCase__ )
A : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__, **lowerCamelCase__ )
A : List[Any] = self.tokenizer_class.from_pretrained(lowerCamelCase__, **lowerCamelCase__ )
A : str = """A, <mask> AllenNLP sentence."""
A : Dict = tokenizer_r.encode_plus(lowerCamelCase__, add_special_tokens=lowerCamelCase__, return_token_type_ids=lowerCamelCase__ )
A : List[str] = tokenizer_p.encode_plus(lowerCamelCase__, add_special_tokens=lowerCamelCase__, return_token_type_ids=lowerCamelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ), sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ), sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ), )
A : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
A : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""], [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""], [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase__, ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowerCamelCase__, ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def _lowerCAmelCase ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ):
A : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname, use_fast=lowerCamelCase__, add_prefix_space=lowerCamelCase__, trim_offsets=lowerCamelCase__ )
A : List[str] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""], lowerCamelCase__ )
self.assertEqual(post_processor_state["""add_prefix_space"""], lowerCamelCase__ )
self.assertEqual(post_processor_state["""trim_offsets"""], lowerCamelCase__ )
def _lowerCAmelCase ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A : List[str] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
A : Union[str, Any] = f'''{text_of_1_token} {text_of_1_token}'''
A : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__, use_fast=lowerCamelCase__, add_prefix_space=lowerCamelCase__, trim_offsets=lowerCamelCase__ )
A : Optional[Any] = tokenizer_r(lowerCamelCase__, return_offsets_mapping=lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0], (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )), )
A : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__, use_fast=lowerCamelCase__, add_prefix_space=lowerCamelCase__, trim_offsets=lowerCamelCase__ )
A : int = tokenizer_r(lowerCamelCase__, return_offsets_mapping=lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0], (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )), )
A : List[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__, use_fast=lowerCamelCase__, add_prefix_space=lowerCamelCase__, trim_offsets=lowerCamelCase__ )
A : str = tokenizer_r(lowerCamelCase__, return_offsets_mapping=lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0], (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(lowerCamelCase__ ), len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )), )
A : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__, use_fast=lowerCamelCase__, add_prefix_space=lowerCamelCase__, trim_offsets=lowerCamelCase__ )
A : List[Any] = tokenizer_r(lowerCamelCase__, return_offsets_mapping=lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0], (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(lowerCamelCase__ ), len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )), )
A : Optional[Any] = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A : str = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__, use_fast=lowerCamelCase__, add_prefix_space=lowerCamelCase__, trim_offsets=lowerCamelCase__ )
A : str = tokenizer_r(lowerCamelCase__, return_offsets_mapping=lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )), )
A : Any = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__, use_fast=lowerCamelCase__, add_prefix_space=lowerCamelCase__, trim_offsets=lowerCamelCase__ )
A : Union[str, Any] = tokenizer_r(lowerCamelCase__, return_offsets_mapping=lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(lowerCamelCase__ ), 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )), )
A : Any = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__, use_fast=lowerCamelCase__, add_prefix_space=lowerCamelCase__, trim_offsets=lowerCamelCase__ )
A : Optional[int] = tokenizer_r(lowerCamelCase__, return_offsets_mapping=lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(lowerCamelCase__ ), 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )), )
| 520
| 0
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Any =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ={
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
SCREAMING_SNAKE_CASE_: Union[str, Any] =[
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Tuple ) -> str:
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase_ = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
UpperCAmelCase_ = getattr(snake_case_ , snake_case_ ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
UpperCAmelCase_ = None
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase_ = True
elif name.split("." )[0] == "proj":
UpperCAmelCase_ = fairseq_model.proj
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(snake_case_ )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
UpperCAmelCase_ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ = "weight_v"
elif "bias" in name:
UpperCAmelCase_ = "bias"
elif "weight" in name:
UpperCAmelCase_ = "weight"
else:
UpperCAmelCase_ = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : str , snake_case_ : List[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ = name.split("." )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = emb.weight.shape
UpperCAmelCase_ = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
UpperCAmelCase_ = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( snake_case_ : Any ) -> List[Any]:
'''simple docstring'''
with open(snake_case_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [line.split(" " )[0] for line in lines]
UpperCAmelCase_ = len(snake_case_ )
UpperCAmelCase_ = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(snake_case_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : str , snake_case_ : Optional[int] , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = WavaVecaConfig.from_pretrained(snake_case_ )
UpperCAmelCase_ = SpeechaTextaConfig.from_pretrained(
snake_case_ , vocab_size=snake_case_ , decoder_layers=snake_case_ , do_stable_layer_norm=snake_case_ )
UpperCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
UpperCAmelCase_ = model[0].eval()
# set weights for wav2vec2 encoder
UpperCAmelCase_ = WavaVecaModel(snake_case_ )
UpperCAmelCase_ = recursively_load_weights_wavaveca(model.encoder , snake_case_ )
UpperCAmelCase_ = SpeechaTextaForCausalLM(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case_ )
# set output linear layer
unexpected_keys.remove("embed_out" )
UpperCAmelCase_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
UpperCAmelCase_ = SpeechEncoderDecoderModel(encoder=snake_case_ , decoder=snake_case_ )
UpperCAmelCase_ = False
# add projection layer
UpperCAmelCase_ = nn.Parameter(projection_layer.weight )
UpperCAmelCase_ = nn.Parameter(projection_layer.bias )
UpperCAmelCase_ = create_vocab_dict(snake_case_ )
with open(os.path.join(snake_case_ , "vocab.json" ) , "w" ) as fp:
json.dump(snake_case_ , snake_case_ )
UpperCAmelCase_ = SpeechaTextaTokenizer(os.path.join(snake_case_ , "vocab.json" ) )
tokenizer.save_pretrained(snake_case_ )
UpperCAmelCase_ = hf_wavavec.config.to_dict()
UpperCAmelCase_ = tokenizer.pad_token_id
UpperCAmelCase_ = tokenizer.bos_token_id
UpperCAmelCase_ = tokenizer.eos_token_id
UpperCAmelCase_ = "speech_to_text_2"
UpperCAmelCase_ = "wav2vec2"
UpperCAmelCase_ = SpeechEncoderDecoderConfig.from_dict(snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
feature_extractor.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_02_24, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
SCREAMING_SNAKE_CASE_: Tuple =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 78
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->None:
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 590
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = StableDiffusionSAGPipeline
lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : int = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, )
_lowerCAmelCase : Any = DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=__a, set_alpha_to_one=__a, )
torch.manual_seed(0)
_lowerCAmelCase : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, )
torch.manual_seed(0)
_lowerCAmelCase : Any = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
_lowerCAmelCase : List[str] = CLIPTextModel(__a)
_lowerCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
_lowerCAmelCase : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case__ ( self, __a, __a=0):
'''simple docstring'''
if str(__a).startswith("mps"):
_lowerCAmelCase : Any = torch.manual_seed(__a)
else:
_lowerCAmelCase : int = torch.Generator(device=__a).manual_seed(__a)
_lowerCAmelCase : Tuple = {
"prompt": ".",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 1.0,
"sag_scale": 1.0,
"output_type": "numpy",
}
return inputs
def snake_case__ ( self):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
_lowerCAmelCase : str = sag_pipe.to(__a)
sag_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = "."
_lowerCAmelCase : Tuple = torch.manual_seed(0)
_lowerCAmelCase : List[Any] = sag_pipe(
[prompt], generator=__a, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type="np")
_lowerCAmelCase : Any = output.images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : List[str] = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base")
_lowerCAmelCase : List[Any] = sag_pipe.to(__a)
sag_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : List[str] = "."
_lowerCAmelCase : List[str] = torch.manual_seed(0)
_lowerCAmelCase : Optional[int] = sag_pipe(
[prompt], generator=__a, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type="np")
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Any = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base")
_lowerCAmelCase : int = sag_pipe.to(__a)
sag_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Optional[Any] = "."
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0)
_lowerCAmelCase : List[str] = sag_pipe(
[prompt], width=768, height=512, generator=__a, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type="np", )
_lowerCAmelCase : Any = output.images
assert image.shape == (1, 512, 768, 3)
| 658
|
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 658
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
A__ : List[Any] = logging.get_logger(__name__)
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=False ) -> List[Any]:
__lowerCamelCase : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCamelCase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCAmelCase__ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple=False ) -> Optional[int]:
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCamelCase : str = ''
else:
__lowerCamelCase : str = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase : Optional[int] = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
__lowerCamelCase : Union[str, Any] = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
__lowerCamelCase : str = in_proj_bias[: config.hidden_size]
__lowerCamelCase : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase : str = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] ) -> Tuple:
__lowerCamelCase : Dict = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__a , __a )
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ) -> List[str]:
__lowerCamelCase : Dict = dct.pop(__a )
__lowerCamelCase : Optional[Any] = val
def UpperCAmelCase__ ( ) -> List[Any]:
__lowerCamelCase : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase : List[str] = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ) -> str:
__lowerCamelCase : Tuple = ViTConfig()
__lowerCamelCase : Dict = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
__lowerCamelCase : int = True
__lowerCamelCase : str = int(vit_name[-12:-10] )
__lowerCamelCase : int = int(vit_name[-9:-6] )
else:
__lowerCamelCase : Dict = 10_00
__lowerCamelCase : List[str] = 'huggingface/label-files'
__lowerCamelCase : List[Any] = 'imagenet-1k-id2label.json'
__lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
__lowerCamelCase : Optional[Any] = {int(__a ): v for k, v in idalabel.items()}
__lowerCamelCase : Union[str, Any] = idalabel
__lowerCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__lowerCamelCase : List[str] = int(vit_name[-6:-4] )
__lowerCamelCase : List[Any] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
__lowerCamelCase : Any = 1_92
__lowerCamelCase : Tuple = 7_68
__lowerCamelCase : str = 12
__lowerCamelCase : Union[str, Any] = 3
elif vit_name[9:].startswith('small' ):
__lowerCamelCase : str = 3_84
__lowerCamelCase : Any = 15_36
__lowerCamelCase : str = 12
__lowerCamelCase : List[Any] = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
__lowerCamelCase : List[Any] = 7_68
__lowerCamelCase : Optional[int] = 23_04
__lowerCamelCase : Tuple = 8
__lowerCamelCase : Optional[int] = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
__lowerCamelCase : Union[str, Any] = 10_24
__lowerCamelCase : Optional[int] = 40_96
__lowerCamelCase : Dict = 24
__lowerCamelCase : Optional[Any] = 16
elif vit_name[4:].startswith('huge' ):
__lowerCamelCase : Dict = 12_80
__lowerCamelCase : List[str] = 51_20
__lowerCamelCase : List[str] = 32
__lowerCamelCase : List[str] = 16
# load original model from timm
__lowerCamelCase : Optional[int] = timm.create_model(__a , pretrained=__a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCamelCase : List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(__a )
__lowerCamelCase : Tuple = create_rename_keys(__a , __a )
for src, dest in rename_keys:
rename_key(__a , __a , __a )
read_in_q_k_v(__a , __a , __a )
# load HuggingFace model
if vit_name[-5:] == "in21k":
__lowerCamelCase : Optional[int] = ViTModel(__a ).eval()
else:
__lowerCamelCase : Optional[Any] = ViTForImageClassification(__a ).eval()
model.load_state_dict(__a )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
__lowerCamelCase : int = DeiTImageProcessor(size=config.image_size )
else:
__lowerCamelCase : str = ViTImageProcessor(size=config.image_size )
__lowerCamelCase : Any = image_processor(images=prepare_img() , return_tensors='pt' )
__lowerCamelCase : List[Any] = encoding['pixel_values']
__lowerCamelCase : Union[str, Any] = model(__a )
if base_model:
__lowerCamelCase : Dict = timm_model.forward_features(__a )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__a , outputs.pooler_output , atol=1e-3 )
else:
__lowerCamelCase : Optional[int] = timm_model(__a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__a , outputs.logits , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__a )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
A__ : Tuple = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 13
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ : Any = logging.get_logger(__name__)
def A_ (__a ):
'''simple docstring'''
A_ = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
A_ = 128
elif "12-12" in model_name:
A_ = 12
A_ = 12
elif "14-14" in model_name:
A_ = 14
A_ = 14
elif "16-16" in model_name:
A_ = 16
A_ = 16
else:
raise ValueError("Model not supported" )
A_ = "huggingface/label-files"
if "speech-commands" in model_name:
A_ = 35
A_ = "speech-commands-v2-id2label.json"
else:
A_ = 527
A_ = "audioset-id2label.json"
A_ = json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
A_ = {int(__a ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
return config
def A_ (__a ):
'''simple docstring'''
if "module.v" in name:
A_ = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
A_ = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
A_ = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
A_ = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
A_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
A_ = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
A_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
A_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
A_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
A_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
A_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
A_ = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
A_ = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
A_ = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
A_ = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def A_ (__a , __a ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A_ = orig_state_dict.pop(__a )
if "qkv" in key:
A_ = key.split("." )
A_ = int(key_split[3] )
A_ = config.hidden_size
if "weight" in key:
A_ = val[:dim, :]
A_ = val[dim : dim * 2, :]
A_ = val[-dim:, :]
else:
A_ = val[:dim]
A_ = val[dim : dim * 2]
A_ = val[-dim:]
else:
A_ = val
return orig_state_dict
def A_ (__a ):
'''simple docstring'''
A_ = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
@torch.no_grad()
def A_ (__a , __a , __a=False ):
'''simple docstring'''
A_ = get_audio_spectrogram_transformer_config(__a )
A_ = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
A_ = model_name_to_url[model_name]
A_ = torch.hub.load_state_dict_from_url(__a , map_location="cpu" )
# remove some keys
remove_keys(__a )
# rename some keys
A_ = convert_state_dict(__a , __a )
# load 🤗 model
A_ = ASTForAudioClassification(__a )
model.eval()
model.load_state_dict(__a )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
A_ = -4.2677393 if "speech-commands" not in model_name else -6.845978
A_ = 4.5689974 if "speech-commands" not in model_name else 5.5654526
A_ = 1024 if "speech-commands" not in model_name else 128
A_ = ASTFeatureExtractor(mean=__a , std=__a , max_length=__a )
if "speech-commands" in model_name:
A_ = load_dataset("speech_commands" , "v0.02" , split="validation" )
A_ = dataset[0]["audio"]["array"]
else:
A_ = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
A_ , A_ = torchaudio.load(__a )
A_ = waveform.squeeze().numpy()
A_ = feature_extractor(__a , sampling_rate=1_6000 , return_tensors="pt" )
# forward pass
A_ = model(**__a )
A_ = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
A_ = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
A_ = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
A_ = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
A_ = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
A_ = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
A_ = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
A_ = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
A_ = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , __a , atol=1e-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(__a ).mkdir(exist_ok=__a )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
print(f'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(__a )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(f'MIT/{model_name}' )
feature_extractor.push_to_hub(f'MIT/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCamelCase_ : List[str] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 115
| 0
|
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def __UpperCamelCase ( snake_case ) -> Optional[int]:
'''simple docstring'''
__A = np.max(snake_case , axis=-1 , keepdims=snake_case )
__A = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case )
class _lowerCAmelCase( _a):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self , **UpperCAmelCase )-> Dict:
__A = {}
if "second_text" in kwargs:
__A = kwargs['''second_text''']
return preprocess_kwargs, {}, {}
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase=None )-> Optional[int]:
return self.tokenizer(UpperCAmelCase , text_pair=UpperCAmelCase , return_tensors=self.framework )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> List[Any]:
return self.model(**UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> Tuple:
__A = model_outputs.logits[0].numpy()
__A = softmax(UpperCAmelCase )
__A = np.argmax(UpperCAmelCase )
__A = self.model.config.idalabel[best_class]
__A = probabilities[best_class].item()
__A = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 705
|
_UpperCamelCase : Optional[int] = 8.31_44_62 # Unit - J mol-1 K-1
def __UpperCamelCase ( snake_case , snake_case , snake_case ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __UpperCamelCase ( snake_case , snake_case , snake_case ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 341
| 0
|
from __future__ import annotations
from random import choice
def __a ( A__ : int ):
return choice(A__ )
def __a ( A__ : list[int] , A__ : int ):
SCREAMING_SNAKE_CASE = random_pivot(A__ )
# partition based on pivot
# linear time
SCREAMING_SNAKE_CASE = [e for e in lst if e < pivot]
SCREAMING_SNAKE_CASE = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(A__ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(A__ ) < k - 1:
return kth_number(A__ , k - len(A__ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(A__ , A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16
|
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=2 , snake_case_=9_9 , snake_case_=0 , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=2 , snake_case_=0.02 , snake_case_=2 , snake_case_=4 , snake_case_="last" , snake_case_=True , snake_case_=None , snake_case_=0 , ) -> Any:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_lengths
_a = use_token_type_ids
_a = use_labels
_a = gelu_activation
_a = sinusoidal_embeddings
_a = causal
_a = asm
_a = n_langs
_a = vocab_size
_a = n_special
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = summary_type
_a = use_proj
_a = scope
_a = bos_token_id
def __lowerCAmelCase ( self ) -> Tuple:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_input_lengths:
_a = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , 2 ).float()
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowerCAmelCase ( self ) -> str:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> Optional[int]:
_a = XLMModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , lengths=snake_case_ , langs=snake_case_ )
_a = model(snake_case_ , langs=snake_case_ )
_a = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> Union[str, Any]:
_a = XLMWithLMHeadModel(snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> str:
_a = XLMForQuestionAnsweringSimple(snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ )
_a = model(snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ )
_a = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> Optional[int]:
_a = XLMForQuestionAnswering(snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ )
_a = model(
snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , cls_index=snake_case_ , is_impossible=snake_case_ , p_mask=snake_case_ , )
_a = model(
snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , cls_index=snake_case_ , is_impossible=snake_case_ , )
((_a) , ) = result_with_labels.to_tuple()
_a = model(snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ )
((_a) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> Tuple:
_a = XLMForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ )
_a = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> Union[str, Any]:
_a = self.num_labels
_a = XLMForTokenClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> str:
_a = self.num_choices
_a = XLMForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class A ( a , a , a , unittest.TestCase ):
__UpperCAmelCase : str = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : int = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__UpperCAmelCase : List[Any] = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_=False ) -> List[Any]:
_a = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def __lowerCAmelCase ( self ) -> Dict:
_a = XLMModelTester(self )
_a = ConfigTester(self , config_class=snake_case_ , emb_dim=3_7 )
def __lowerCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*snake_case_ )
def __lowerCAmelCase ( self ) -> Dict:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=False , snake_case_=1 ) -> Dict:
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertListEqual(
[isinstance(snake_case_ , snake_case_ ) for iter_attentions in attentions] , [True] * len(snake_case_ ) )
self.assertEqual(len(snake_case_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(snake_case_ ):
# adds PAD dummy token
_a = min_length + idx + 1
_a = min_length + idx + 1
_a = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(snake_case_ ) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=False , snake_case_=1 ) -> Dict:
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertListEqual(
[isinstance(snake_case_ , snake_case_ ) for iter_hidden_states in hidden_states] , [True] * len(snake_case_ ) , )
self.assertEqual(len(snake_case_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(snake_case_ ):
# adds PAD dummy token
_a = min_length + idx + 1
_a = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(snake_case_ ) , )
pass
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = XLMModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(snake_case_ )
_a = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=snake_case_ ) # the president
_a = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_a = model.generate(snake_case_ , do_sample=snake_case_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , snake_case_ )
| 131
| 0
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __a ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = JukeboxTokenizer
SCREAMING_SNAKE_CASE = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def UpperCamelCase ( self : int)-> Optional[Any]:
import torch
__lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""")
__lowerCAmelCase =tokenizer(**self.metas)["""input_ids"""]
# fmt: off
__lowerCAmelCase =[
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]]),
torch.tensor([[0, 0, 0, 10_69, 11]]),
torch.tensor([[0, 0, 0, 10_69, 11]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
@require_torch
def UpperCamelCase ( self : Optional[Any])-> Tuple:
import torch
__lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""")
__lowerCAmelCase =tokenizer(**self.metas)["""input_ids"""]
# fmt: off
__lowerCAmelCase =[
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]]),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
| 456
|
import math
from numpy import inf
from scipy.integrate import quad
def __lowerCAmelCase ( __lowerCamelCase : float ) -> float:
if num <= 0:
raise ValueError("""math domain error""" )
return quad(__lowerCamelCase , 0 , __lowerCamelCase , args=(__lowerCamelCase) )[0]
def __lowerCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : float ) -> float:
return math.pow(__lowerCamelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 456
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowerCAmelCase__ = {
'''xlm-roberta-base''': 512,
'''xlm-roberta-large''': 512,
'''xlm-roberta-large-finetuned-conll02-dutch''': 512,
'''xlm-roberta-large-finetuned-conll02-spanish''': 512,
'''xlm-roberta-large-finetuned-conll03-english''': 512,
'''xlm-roberta-large-finetuned-conll03-german''': 512,
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Union[str, Any] =VOCAB_FILES_NAMES
a : str =PRETRAINED_VOCAB_FILES_MAP
a : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[Any] =["input_ids", "attention_mask"]
def __init__( self , snake_case__ , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Any = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
lowerCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
lowerCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
lowerCAmelCase : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase : Union[str, Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase : Union[str, Any] = 1
lowerCAmelCase : int = len(self.sp_model ) + self.fairseq_offset
lowerCAmelCase : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.__dict__.copy()
lowerCAmelCase : Tuple = None
lowerCAmelCase : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase : Union[str, Any] = {}
lowerCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase : Tuple = [self.cls_token_id]
lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : Tuple = [self.sep_token_id]
lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase : Tuple = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase : str = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
lowerCAmelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 645
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
warnings.warn(
"The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ChineseCLIPImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 645
| 1
|
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
lowerCamelCase_ = str(bin(__UpperCamelCase ) )
binary_number += "0" * shift_amount
return binary_number
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
lowerCamelCase_ = str(bin(__UpperCamelCase ) )[2:]
if shift_amount >= len(__UpperCamelCase ):
return "0b0"
lowerCamelCase_ = binary_number[: len(__UpperCamelCase ) - shift_amount]
return "0b" + shifted_binary_number
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
lowerCamelCase_ = '0' + str(bin(__UpperCamelCase ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
lowerCamelCase_ = len(bin(__UpperCamelCase )[3:] ) # Find 2's complement of number
lowerCamelCase_ = bin(abs(__UpperCamelCase ) - (1 << binary_number_length) )[3:]
lowerCamelCase_ = (
'1' + '0' * (binary_number_length - len(__UpperCamelCase )) + binary_number
)
if shift_amount >= len(__UpperCamelCase ):
return "0b" + binary_number[0] * len(__UpperCamelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__UpperCamelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE_ = 768 , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowerCamelCase_ = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = nn.Parameter(torch.ones(1 , SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = nn.Parameter(self.mean.to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = nn.Parameter(self.std.to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) )
return self
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = (embeds * self.std) + self.mean
return embeds
| 384
| 0
|
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__a = "sshleifer/bart-tiny-random"
__a = "patrickvonplaten/t5-tiny-random"
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase ( self : Optional[int] ):
return AutoConfig.from_pretrained(snake_case_ )
def lowerCamelCase ( self : int ):
snake_case__ , *snake_case__ : Optional[Any] = create_student_by_copying_alternating_layers(snake_case_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def lowerCamelCase ( self : Optional[int] ):
snake_case__ , *snake_case__ : Tuple = create_student_by_copying_alternating_layers(snake_case_ , tempfile.mkdtemp() , e=1 , d=snake_case_ )
def lowerCamelCase ( self : str ):
snake_case__ , *snake_case__ : Union[str, Any] = create_student_by_copying_alternating_layers(snake_case_ , tempfile.mkdtemp() , e=1 , d=snake_case_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def lowerCamelCase ( self : Tuple ):
snake_case__ , *snake_case__ : Dict = create_student_by_copying_alternating_layers(snake_case_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def lowerCamelCase ( self : Optional[Any] ):
with self.assertRaises(snake_case_ ):
create_student_by_copying_alternating_layers(snake_case_ , tempfile.mkdtemp() , e=snake_case_ , d=snake_case_ )
| 374
|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Any , snake_case_ : List[Any] , snake_case_ : Union[str, Any]=13 , snake_case_ : Optional[int]=3 , snake_case_ : List[Any]=True , snake_case_ : Dict=True , snake_case_ : Optional[Any]=0.1 , snake_case_ : Tuple=0.1 , snake_case_ : Dict=224 , snake_case_ : Dict=1_000 , snake_case_ : Tuple=[3, 3, 6, 4] , snake_case_ : Tuple=[48, 56, 112, 220] , ):
snake_case__ : str = parent
snake_case__ : Dict = batch_size
snake_case__ : Dict = num_channels
snake_case__ : Any = is_training
snake_case__ : Union[str, Any] = use_labels
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : List[str] = num_labels
snake_case__ : Any = image_size
snake_case__ : str = layer_depths
snake_case__ : Union[str, Any] = embed_dims
def lowerCamelCase ( self : Tuple ):
snake_case__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : int = None
if self.use_labels:
snake_case__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : str ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=snake_case_ , layer_scale_init_value=1E-5 , )
def lowerCamelCase ( self : Optional[int] , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : List[str] ):
snake_case__ : Dict = SwiftFormerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : List[str] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase ( self : List[Any] , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
snake_case__ : Optional[int] = self.num_labels
snake_case__ : Union[str, Any] = SwiftFormerForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Optional[Any] = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
snake_case__ : Union[str, Any] = SwiftFormerForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Any = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : List[Any] ):
((snake_case__) , (snake_case__) , (snake_case__)) : Union[str, Any] = self.prepare_config_and_inputs()
snake_case__ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowercase = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def lowerCamelCase ( self : Tuple ):
snake_case__ : Optional[Any] = SwiftFormerModelTester(self )
snake_case__ : Union[str, Any] = ConfigTester(
self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def lowerCamelCase ( self : Optional[Any] ):
pass
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = model_class(snake_case_ )
snake_case__ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def lowerCamelCase ( self : List[str] ):
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = model_class(snake_case_ )
snake_case__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[str] = [*signature.parameters.keys()]
snake_case__ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowerCamelCase ( self : List[Any] ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCamelCase ( self : Tuple ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def lowerCamelCase ( self : List[str] ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Union[str, Any] = SwiftFormerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def lowerCamelCase ( self : Union[str, Any] ):
pass
def lowerCamelCase ( self : List[Any] ):
def check_hidden_states_output(snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[Any] ):
snake_case__ : Optional[Any] = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
snake_case__ : Tuple = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
snake_case__ : Optional[Any] = outputs.hidden_states
snake_case__ : List[Any] = 8
self.assertEqual(len(snake_case_ ) , snake_case_ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(snake_case_ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : str = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
def _config_zero_init(snake_case_ : Optional[Any] ):
snake_case__ : Union[str, Any] = copy.deepcopy(snake_case_ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(snake_case_ , snake_case_ , 1E-1_0 )
if isinstance(getattr(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ ):
snake_case__ : List[str] = _config_zero_init(getattr(snake_case_ , snake_case_ ) )
setattr(snake_case_ , snake_case_ , snake_case_ )
return configs_no_init
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[str] = _config_zero_init(snake_case_ )
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = model_class(config=snake_case_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase ( self : Tuple ):
pass
def __snake_case( ) -> Optional[int]:
snake_case__ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase ( self : List[str] ):
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def lowerCamelCase ( self : List[Any] ):
snake_case__ : str = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(snake_case_ )
snake_case__ : Dict = self.default_image_processor
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : Union[str, Any] = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
snake_case__ : Dict = model(**snake_case_ )
# verify the logits
snake_case__ : int = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case_ )
snake_case__ : int = torch.tensor([[-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 374
| 1
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase :
'''simple docstring'''
__magic_name__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__magic_name__ : Optional[str] = field(
default=snake_case__ , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__magic_name__ : Optional[str] = field(
default=snake_case__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__magic_name__ : Optional[str] = field(
default=snake_case__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__magic_name__ : bool = field(default=snake_case__ , metadata={"help": "Whether tp freeze the encoder."})
__magic_name__ : bool = field(default=snake_case__ , metadata={"help": "Whether to freeze the embeddings."})
@dataclass
class UpperCAmelCase :
'''simple docstring'''
__magic_name__ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
__magic_name__ : Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__magic_name__ : Optional[int] = field(
default=1_024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__ : Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__ : Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__magic_name__ : Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__ : Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."})
__magic_name__ : Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."})
__magic_name__ : Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."})
__magic_name__ : Optional[str] = field(default=snake_case__ , metadata={"help": "Source language id for translation."})
__magic_name__ : Optional[str] = field(default=snake_case__ , metadata={"help": "Target language id for translation."})
__magic_name__ : Optional[int] = field(default=snake_case__ , metadata={"help": "# num_beams to use for evaluation."})
__magic_name__ : bool = field(
default=snake_case__ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F"""{split}_results.json""" ) )
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a_ , a_ , a_ =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a_ , a_ , a_ =parser.parse_args_into_dataclasses()
check_output_dir(_SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a_ =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a_ =("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
a_ =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a_ =AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_SCREAMING_SNAKE_CASE , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
a_ =model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ =tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
a_ =tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_SCREAMING_SNAKE_CASE )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
a_ =SeqaSeqDataset
# Get datasets
a_ =(
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
a_ =(
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
a_ =(
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
a_ =(
build_compute_metrics_fn(data_args.task , _SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None
)
a_ =SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
a_ ={}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
a_ =trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
a_ =train_result.metrics
a_ =data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a_ =trainer.evaluate(metric_key_prefix="val" )
a_ =data_args.n_val
a_ =round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info("*** Predict ***" )
a_ =trainer.predict(test_dataset=_SCREAMING_SNAKE_CASE , metric_key_prefix="test" )
a_ =test_output.metrics
a_ =data_args.n_test
if trainer.is_world_process_zero():
a_ =round(metrics["test_loss"] , 4 )
handle_metrics("test" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.predict_with_generate:
a_ =tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
a_ =lmap(str.strip , _SCREAMING_SNAKE_CASE )
write_txt_file(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 702
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowercase = '''path-to-your-trained-model'''
lowercase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowercase = '''A photo of sks dog in a bucket'''
lowercase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 41
| 0
|
'''simple docstring'''
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __A ( lowerCAmelCase_ , lowerCAmelCase_=False ):
_UpperCAmelCase : Any = OmegaConf.load(_A )
if display:
print(yaml.dump(OmegaConf.to_container(_A ) ) )
return config
def __A ( lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ):
if conf_path is None:
_UpperCAmelCase : Tuple = """./model_checkpoints/vqgan_only.yaml"""
_UpperCAmelCase : Any = load_config(_A , display=_A )
_UpperCAmelCase : Any = VQModel(**config.model.params )
if ckpt_path is None:
_UpperCAmelCase : Union[str, Any] = """./model_checkpoints/vqgan_only.pt"""
_UpperCAmelCase : int = torch.load(_A , map_location=_A )
if ".ckpt" in ckpt_path:
_UpperCAmelCase : Optional[Any] = sd["""state_dict"""]
model.load_state_dict(_A , strict=_A )
model.to(_A )
del sd
return model
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = model.encode(_A )
print(f"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_UpperCAmelCase : int = model.decode(_A )
return xrec
def __A ( lowerCAmelCase_ , lowerCAmelCase_=False ):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = string.rsplit(""".""" , 1 )
if reload:
_UpperCAmelCase : Optional[int] = importlib.import_module(_A )
importlib.reload(_A )
return getattr(importlib.import_module(_A , package=_A ) , cls )
def __A ( lowerCAmelCase_ ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True ):
_UpperCAmelCase : Tuple = instantiate_from_config(_A )
if sd is not None:
model.load_state_dict(_A )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if ckpt:
_UpperCAmelCase : int = torch.load(_A , map_location="""cpu""" )
_UpperCAmelCase : List[str] = pl_sd["""global_step"""]
print(f"loaded model from global step {global_step}." )
else:
_UpperCAmelCase : Optional[int] = {"""state_dict""": None}
_UpperCAmelCase : Any = None
_UpperCAmelCase : Tuple = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=_A , eval_mode=_A )["""model"""]
return model, global_step
| 414
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class a_ :
def __init__( self : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict=13 , __UpperCamelCase : Tuple=7 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : List[str]=True , __UpperCamelCase : str=99 , __UpperCamelCase : Optional[Any]=32 , __UpperCamelCase : Union[str, Any]=5 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : str="gelu" , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : List[str]=5_12 , __UpperCamelCase : Union[str, Any]=16 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : Union[str, Any]=0.0_2 , __UpperCamelCase : List[str]=3 , __UpperCamelCase : Optional[Any]=4 , __UpperCamelCase : str=None , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def _snake_case ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self : str ) ->Any:
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def _snake_case ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = LlamaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = True
_UpperCAmelCase = LlamaModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : int , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = LlamaForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : int , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : Dict , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = LlamaForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# first forward pass
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase , )
_UpperCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["""hidden_states"""][0]
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["""hidden_states"""][0]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def _snake_case ( self : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : Optional[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
a : Any = (LlamaForCausalLM,) if is_torch_available() else ()
a : Union[str, Any] = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a : str = False
a : List[str] = False
def _snake_case ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = LlamaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _snake_case ( self : int ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _snake_case ( self : Tuple ) ->int:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = input_dict["""input_ids"""]
_UpperCAmelCase = input_ids.ne(1 ).to(__UpperCamelCase )
_UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_UpperCAmelCase = LlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _snake_case ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = """single_label_classification"""
_UpperCAmelCase = input_dict["""input_ids"""]
_UpperCAmelCase = input_ids.ne(1 ).to(__UpperCamelCase )
_UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_UpperCAmelCase = LlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _snake_case ( self : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = """multi_label_classification"""
_UpperCAmelCase = input_dict["""input_ids"""]
_UpperCAmelCase = input_ids.ne(1 ).to(__UpperCamelCase )
_UpperCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_UpperCAmelCase = LlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def _snake_case ( self : Dict ) ->int:
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _snake_case ( self : int , __UpperCamelCase : Tuple ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = ids_tensor([1, 10] , config.vocab_size )
_UpperCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_UpperCAmelCase = LlamaModel(__UpperCamelCase )
original_model.to(__UpperCamelCase )
original_model.eval()
_UpperCAmelCase = original_model(__UpperCamelCase ).last_hidden_state
_UpperCAmelCase = original_model(__UpperCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_UpperCAmelCase = {"""type""": scaling_type, """factor""": 1_0.0}
_UpperCAmelCase = LlamaModel(__UpperCamelCase )
scaled_model.to(__UpperCamelCase )
scaled_model.eval()
_UpperCAmelCase = scaled_model(__UpperCamelCase ).last_hidden_state
_UpperCAmelCase = scaled_model(__UpperCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _snake_case ( self : str ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_UpperCAmelCase = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" )
_UpperCAmelCase = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_UpperCAmelCase = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_UpperCAmelCase = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _snake_case ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_UpperCAmelCase = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" )
_UpperCAmelCase = model(torch.tensor(__UpperCamelCase ) )
# Expected mean on dim = -1
_UpperCAmelCase = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_UpperCAmelCase = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _snake_case ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_UpperCAmelCase = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" )
_UpperCAmelCase = model(torch.tensor(__UpperCamelCase ) )
# Expected mean on dim = -1
_UpperCAmelCase = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_UpperCAmelCase = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def _snake_case ( self : str ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_UpperCAmelCase = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" )
_UpperCAmelCase = model(torch.tensor(__UpperCamelCase ) )
_UpperCAmelCase = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# fmt: off
_UpperCAmelCase = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def _snake_case ( self : Tuple ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
_UpperCAmelCase = """Simply put, the theory of relativity states that """
_UpperCAmelCase = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
_UpperCAmelCase = tokenizer.encode(__UpperCamelCase , return_tensors="""pt""" )
_UpperCAmelCase = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=__UpperCamelCase )
# greedy generation outputs
_UpperCAmelCase = model.generate(__UpperCamelCase , max_new_tokens=64 , top_p=__UpperCamelCase , temperature=1 , do_sample=__UpperCamelCase )
_UpperCAmelCase = tokenizer.decode(generated_ids[0] , skip_special_tokens=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
| 555
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCamelCase :
def __init__( self , a_ , ):
lowerCAmelCase : str = parent
lowerCAmelCase : int = 13
lowerCAmelCase : Optional[int] = 7
lowerCAmelCase : str = True
lowerCAmelCase : Optional[int] = True
lowerCAmelCase : List[str] = True
lowerCAmelCase : Optional[Any] = True
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Any = False
lowerCAmelCase : Any = False
lowerCAmelCase : Dict = False
lowerCAmelCase : int = 2
lowerCAmelCase : Optional[int] = 99
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Optional[int] = 32
lowerCAmelCase : Optional[int] = 2
lowerCAmelCase : Dict = 4
lowerCAmelCase : List[str] = 0.1
lowerCAmelCase : str = 0.1
lowerCAmelCase : Dict = 512
lowerCAmelCase : Union[str, Any] = 16
lowerCAmelCase : Dict = 2
lowerCAmelCase : Optional[Any] = 0.02
lowerCAmelCase : str = 3
lowerCAmelCase : Optional[int] = 4
lowerCAmelCase : List[str] = "last"
lowerCAmelCase : List[str] = True
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Optional[int] = 0
def _lowerCamelCase ( self ):
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
lowerCAmelCase : Tuple = None
if self.use_input_lengths:
lowerCAmelCase : Any = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase : str = None
if self.use_token_type_ids:
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase : str = None
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : List[str] = None
if self.use_labels:
lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Optional[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCAmelCase : List[Any] = TFFlaubertModel(config=a_ )
lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
lowerCAmelCase : List[Any] = model(a_ )
lowerCAmelCase : int = [input_ids, input_mask]
lowerCAmelCase : Optional[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCAmelCase : int = TFFlaubertWithLMHeadModel(a_ )
lowerCAmelCase : str = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
lowerCAmelCase : List[str] = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCAmelCase : Optional[Any] = TFFlaubertForQuestionAnsweringSimple(a_ )
lowerCAmelCase : List[str] = {"input_ids": input_ids, "lengths": input_lengths}
lowerCAmelCase : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCAmelCase : Any = TFFlaubertForSequenceClassification(a_ )
lowerCAmelCase : List[str] = {"input_ids": input_ids, "lengths": input_lengths}
lowerCAmelCase : str = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCAmelCase : Tuple = self.num_labels
lowerCAmelCase : Tuple = TFFlaubertForTokenClassification(config=a_ )
lowerCAmelCase : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase : Optional[int] = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCAmelCase : Any = self.num_choices
lowerCAmelCase : Tuple = TFFlaubertForMultipleChoice(config=a_ )
lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(a_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(a_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(a_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Union[str, Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCAmelCase : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
lowerCAmelCase
) : Tuple = config_and_inputs
lowerCAmelCase : Dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCamelCase ( _A , _A , unittest.TestCase ):
snake_case_ = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case_ = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
snake_case_ = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def _lowerCamelCase ( self , a_ , a_ , a_ , a_ , a_ ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCamelCase ( self ):
lowerCAmelCase : Dict = TFFlaubertModelTester(self )
lowerCAmelCase : int = ConfigTester(self , config_class=a_ , emb_dim=37 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*a_ )
@slow
def _lowerCamelCase ( self ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : str = TFFlaubertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
lowerCAmelCase : Tuple = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
lowerCAmelCase : Optional[Any] = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
lowerCAmelCase : Tuple = model(a_ )[0]
lowerCAmelCase : Union[str, Any] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , a_ )
# compare the actual values for a slice.
lowerCAmelCase : Optional[Any] = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 708
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase = logging.get_logger(__name__)
class lowerCamelCase ( _A ):
def __init__( self , *a_ , **a_ ):
warnings.warn(
"The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ChineseCLIPImageProcessor instead." , a_ , )
super().__init__(*a_ , **a_ )
| 551
| 0
|
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
A = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
A = '''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
A = '''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
A = '''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _lowerCamelCase ( self : List[Any] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] ,reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] ,)
def _lowerCamelCase ( self : Dict ,UpperCamelCase : List[Any] ) -> Dict:
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def _lowerCamelCase ( self : Union[str, Any] ,UpperCamelCase : Optional[int] ,UpperCamelCase : Optional[int] ,UpperCamelCase : Any=0.9 ,UpperCamelCase : List[str]=3 ,UpperCamelCase : str=0.5 ) -> Any:
if NLTK_VERSION >= version.Version('3.6.5' ):
_lowercase : Dict = [
meteor_score.single_meteor_score(
word_tokenize(UpperCAmelCase_ ) ,word_tokenize(UpperCAmelCase_ ) ,alpha=UpperCAmelCase_ ,beta=UpperCAmelCase_ ,gamma=UpperCAmelCase_ )
for ref, pred in zip(UpperCAmelCase_ ,UpperCAmelCase_ )
]
else:
_lowercase : Optional[Any] = [
meteor_score.single_meteor_score(UpperCAmelCase_ ,UpperCAmelCase_ ,alpha=UpperCAmelCase_ ,beta=UpperCAmelCase_ ,gamma=UpperCAmelCase_ )
for ref, pred in zip(UpperCAmelCase_ ,UpperCAmelCase_ )
]
return {"meteor": np.mean(UpperCAmelCase_ )}
| 125
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 580
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _a (_UpperCAmelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'vivit'
def __init__( self , A__=2_24 , A__=32 , A__=[2, 16, 16] , A__=3 , A__=7_68 , A__=12 , A__=12 , A__=30_72 , A__="gelu_fast" , A__=0.0 , A__=0.0 , A__=0.02 , A__=1E-06 , A__=True , **A__ , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = num_frames
_SCREAMING_SNAKE_CASE = tubelet_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = qkv_bias
super().__init__(**__UpperCamelCase )
| 718
|
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , A__ = None , A__ = None , **A__ , ) -> Optional[int]:
super().__init__(self , **A__ )
_SCREAMING_SNAKE_CASE = repo_info
_SCREAMING_SNAKE_CASE = token
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self ) -> Tuple:
if self.dir_cache is None:
_SCREAMING_SNAKE_CASE = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_SCREAMING_SNAKE_CASE = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(A__ ): {"""name""": str(A__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCamelCase ( self , A__ , A__ = "rb" , **A__ , ) -> Optional[int]:
if not isinstance(self.repo_info , A__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_SCREAMING_SNAKE_CASE = hf_hub_url(self.repo_info.id , A__ , revision=self.repo_info.sha )
return fsspec.open(
A__ , mode=A__ , headers=get_authentication_headers_for_url(A__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def UpperCamelCase ( self , A__ , **A__ ) -> str:
self._get_dirs()
_SCREAMING_SNAKE_CASE = self._strip_protocol(A__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(A__ )
def UpperCamelCase ( self , A__ , A__=False , **A__ ) -> List[Any]:
self._get_dirs()
_SCREAMING_SNAKE_CASE = PurePosixPath(path.strip("""/""" ) )
_SCREAMING_SNAKE_CASE = {}
for p, f in self.dir_cache.items():
_SCREAMING_SNAKE_CASE = PurePosixPath(p.strip("""/""" ) )
_SCREAMING_SNAKE_CASE = p.parent
if root == path:
_SCREAMING_SNAKE_CASE = f
_SCREAMING_SNAKE_CASE = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 0
| 0
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: List[Any] )-> List[str]:
_snake_case : List[Any] = args.log_outputs
_snake_case : List[Any] = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
_snake_case : List[str] = load_metric('wer' )
_snake_case : int = load_metric('cer' )
# compute metrics
_snake_case : Tuple = wer.compute(references=result['target'] , predictions=result['prediction'] )
_snake_case : Optional[Any] = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
_snake_case : Any = F"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCamelCase__ )
with open(F"""{dataset_id}_eval_results.txt""" , 'w' ) as f:
f.write(lowerCamelCase__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
_snake_case : Dict = F"""log_{dataset_id}_predictions.txt"""
_snake_case : List[Any] = F"""log_{dataset_id}_targets.txt"""
with open(lowerCamelCase__ , 'w' ) as p, open(lowerCamelCase__ , 'w' ) as t:
# mapping function to write output
def write_to_file(lowerCAmelCase: Dict , lowerCAmelCase: int ):
p.write(F"""{i}""" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(F"""{i}""" + '\n' )
t.write(batch['target'] + '\n' )
result.map(lowerCamelCase__ , with_indices=lowerCamelCase__ )
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> Optional[int]:
_snake_case : List[Any] = '[,?.!\-\;\:\"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
_snake_case : str = re.sub(lowerCamelCase__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
_snake_case : List[Any] = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
_snake_case : Any = ' '.join(text.split(lowerCamelCase__ ) )
return text
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] )-> Optional[Any]:
_snake_case : List[Any] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
_snake_case : Union[str, Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
_snake_case : int = feature_extractor.sampling_rate
# resample audio
_snake_case : List[str] = dataset.cast_column('audio' , Audio(sampling_rate=lowerCamelCase__ ) )
# load eval pipeline
if args.device is None:
_snake_case : int = 0 if torch.cuda.is_available() else -1
_snake_case : Optional[int] = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCAmelCase: Any ):
_snake_case : int = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
_snake_case : Dict = prediction['text']
_snake_case : Any = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
_snake_case : Dict = dataset.map(lowerCamelCase__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
lowerCAmelCase_ = parser.parse_args()
main(args)
| 411
|
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[int] = MgpstrTokenizer
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : Tuple = {}
UpperCamelCase_ : List[str] = False
def _SCREAMING_SNAKE_CASE ( self : Dict ):
super().setUp()
# fmt: off
lowerCAmelCase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowerCAmelCase__ = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case__ ) + """\n""" )
def _SCREAMING_SNAKE_CASE ( self : int , **snake_case__ : Tuple ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Dict ):
lowerCAmelCase__ = """tester"""
lowerCAmelCase__ = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self.get_tokenizers(do_lower_case=snake_case__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase__ = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
lowerCAmelCase__ = tokenizer.encode([special_token] , add_special_tokens=snake_case__ )
self.assertEqual(len(snake_case__ ) , 1 )
lowerCAmelCase__ = tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
self.assertTrue(special_token not in decoded )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase__ , lowerCAmelCase__ = self.get_input_output_texts(snake_case__ )
lowerCAmelCase__ = tokenizer.tokenize(snake_case__ )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(snake_case__ )
lowerCAmelCase__ = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertNotEqual(len(snake_case__ ) , 0 )
lowerCAmelCase__ = tokenizer.decode(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual(text_a.replace(""" """ , """""" ) , snake_case__ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
pass
| 644
| 0
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
__SCREAMING_SNAKE_CASE :str = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = os.path.dirname(os.path.realpath(__lowercase ) )
_UpperCAmelCase = os.path.join(__lowercase , "words.txt" )
_UpperCAmelCase = ""
with open(__lowercase ) as f:
_UpperCAmelCase = f.readline()
_UpperCAmelCase = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
_UpperCAmelCase = [
word
for word in [sum(ord(__lowercase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__lowercase )
if __name__ == "__main__":
print(solution())
| 119
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def UpperCAmelCase_ ( __lowercase : List[str] ) -> int:
'''simple docstring'''
_UpperCAmelCase = SwinvaConfig()
_UpperCAmelCase = swinva_name.split("_" )
_UpperCAmelCase = name_split[1]
if "to" in name_split[3]:
_UpperCAmelCase = int(name_split[3][-3:] )
else:
_UpperCAmelCase = int(name_split[3] )
if "to" in name_split[2]:
_UpperCAmelCase = int(name_split[2][-2:] )
else:
_UpperCAmelCase = int(name_split[2][6:] )
if model_size == "tiny":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 6, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "small":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "base":
_UpperCAmelCase = 128
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (4, 8, 16, 32)
else:
_UpperCAmelCase = 192
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (6, 12, 24, 48)
if "to" in swinva_name:
_UpperCAmelCase = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_UpperCAmelCase = 2_1841
_UpperCAmelCase = "huggingface/label-files"
_UpperCAmelCase = "imagenet-22k-id2label.json"
_UpperCAmelCase = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase = {int(__lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
else:
_UpperCAmelCase = 1000
_UpperCAmelCase = "huggingface/label-files"
_UpperCAmelCase = "imagenet-1k-id2label.json"
_UpperCAmelCase = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase = {int(__lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = img_size
_UpperCAmelCase = num_classes
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
return config
def UpperCAmelCase_ ( __lowercase : str ) -> Tuple:
'''simple docstring'''
if "patch_embed.proj" in name:
_UpperCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_UpperCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
_UpperCAmelCase = "encoder." + name
if "attn.proj" in name:
_UpperCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_UpperCAmelCase = name.replace("attn" , "attention.self" )
if "norm1" in name:
_UpperCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_UpperCAmelCase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_UpperCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_UpperCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
_UpperCAmelCase = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
_UpperCAmelCase = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
_UpperCAmelCase = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
_UpperCAmelCase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
_UpperCAmelCase = "layernorm.weight"
if name == "norm.bias":
_UpperCAmelCase = "layernorm.bias"
if "head" in name:
_UpperCAmelCase = name.replace("head" , "classifier" )
else:
_UpperCAmelCase = "swinv2." + name
return name
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_UpperCAmelCase = orig_state_dict.pop(__lowercase )
if "mask" in key:
continue
elif "qkv" in key:
_UpperCAmelCase = key.split("." )
_UpperCAmelCase = int(key_split[1] )
_UpperCAmelCase = int(key_split[3] )
_UpperCAmelCase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCAmelCase = val[:dim, :]
_UpperCAmelCase = val[dim : dim * 2, :]
_UpperCAmelCase = val[-dim:, :]
else:
_UpperCAmelCase = val[:dim]
_UpperCAmelCase = val[
dim : dim * 2
]
_UpperCAmelCase = val[-dim:]
else:
_UpperCAmelCase = val
return orig_state_dict
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = timm.create_model(__lowercase , pretrained=__lowercase )
timm_model.eval()
_UpperCAmelCase = get_swinva_config(__lowercase )
_UpperCAmelCase = SwinvaForImageClassification(__lowercase )
model.eval()
_UpperCAmelCase = convert_state_dict(timm_model.state_dict() , __lowercase )
model.load_state_dict(__lowercase )
_UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
_UpperCAmelCase = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
_UpperCAmelCase = image_processor(images=__lowercase , return_tensors="pt" )
_UpperCAmelCase = timm_model(inputs["pixel_values"] )
_UpperCAmelCase = model(**__lowercase ).logits
assert torch.allclose(__lowercase , __lowercase , atol=1E-3 )
print(f'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowercase )
model.push_to_hub(
repo_path_or_name=Path(__lowercase , __lowercase ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 119
| 1
|
def lowerCamelCase__ ( __lowerCAmelCase : list[list[float]] ):
"""simple docstring"""
lowerCAmelCase_ = []
for data in source_data:
for i, el in enumerate(_snake_case ):
if len(_snake_case ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_snake_case ) )
return data_lists
def lowerCamelCase__ ( __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ):
"""simple docstring"""
lowerCAmelCase_ = []
for dlist, weight in zip(_snake_case , _snake_case ):
lowerCAmelCase_ = min(_snake_case )
lowerCAmelCase_ = max(_snake_case )
lowerCAmelCase_ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCAmelCase_ = F"""Invalid weight of {weight:f} provided"""
raise ValueError(_snake_case )
score_lists.append(_snake_case )
return score_lists
def lowerCamelCase__ ( __lowerCAmelCase : list[list[float]] ):
"""simple docstring"""
lowerCAmelCase_ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_snake_case ):
lowerCAmelCase_ = final_scores[j] + ele
return final_scores
def lowerCamelCase__ ( __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ):
"""simple docstring"""
lowerCAmelCase_ = get_data(_snake_case )
lowerCAmelCase_ = calculate_each_score(_snake_case , _snake_case )
lowerCAmelCase_ = generate_final_scores(_snake_case )
# append scores to source data
for i, ele in enumerate(_snake_case ):
source_data[i].append(_snake_case )
return source_data
| 290
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
SCREAMING_SNAKE_CASE__ = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 267
| 0
|
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def SCREAMING_SNAKE_CASE__ ( __A ) -> Dict:
@wraps(UpperCAmelCase__ )
def _inner_fn(*__A , **__A ):
warnings.warn(
(F'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , UpperCAmelCase__ , )
return fn(*UpperCAmelCase__ , **UpperCAmelCase__ )
return _inner_fn
| 710
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self ):
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = LlamaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = True
_snake_case = LlamaModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
_snake_case = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , )
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = LlamaForCausalLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = True
_snake_case = True
_snake_case = LlamaForCausalLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# first forward pass
_snake_case = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ , )
_snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
_snake_case = torch.cat([input_mask, next_mask] , dim=-1 )
_snake_case = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , )['hidden_states'][0]
_snake_case = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , )['hidden_states'][0]
# select random slice
_snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
_snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
__lowercase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__lowercase = (LlamaForCausalLM,) if is_torch_available() else ()
__lowercase = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase = False
__lowercase = False
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = LlamaModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = 3
_snake_case = input_dict['input_ids']
_snake_case = input_ids.ne(1 ).to(lowerCAmelCase_ )
_snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_snake_case = LlamaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = 3
_snake_case = 'single_label_classification'
_snake_case = input_dict['input_ids']
_snake_case = input_ids.ne(1 ).to(lowerCAmelCase_ )
_snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_snake_case = LlamaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = 3
_snake_case = 'multi_label_classification'
_snake_case = input_dict['input_ids']
_snake_case = input_ids.ne(1 ).to(lowerCAmelCase_ )
_snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_snake_case = LlamaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = ids_tensor([1, 10] , config.vocab_size )
_snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_snake_case = LlamaModel(lowerCAmelCase_ )
original_model.to(lowerCAmelCase_ )
original_model.eval()
_snake_case = original_model(lowerCAmelCase_ ).last_hidden_state
_snake_case = original_model(lowerCAmelCase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_snake_case = {'type': scaling_type, 'factor': 10.0}
_snake_case = LlamaModel(lowerCAmelCase_ )
scaled_model.to(lowerCAmelCase_ )
scaled_model.eval()
_snake_case = scaled_model(lowerCAmelCase_ ).last_hidden_state
_snake_case = scaled_model(lowerCAmelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-5 ) )
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_snake_case = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
_snake_case = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_snake_case = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCAmelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_snake_case = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCAmelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_snake_case = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
_snake_case = model(torch.tensor(lowerCAmelCase_ ) )
# Expected mean on dim = -1
_snake_case = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCAmelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_snake_case = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCAmelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_snake_case = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
_snake_case = model(torch.tensor(lowerCAmelCase_ ) )
# Expected mean on dim = -1
_snake_case = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCAmelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_snake_case = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , lowerCAmelCase_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_snake_case = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
_snake_case = model(torch.tensor(lowerCAmelCase_ ) )
_snake_case = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , lowerCAmelCase_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
_snake_case = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCAmelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
_snake_case = 'Simply put, the theory of relativity states that '
_snake_case = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
_snake_case = tokenizer.encode(lowerCAmelCase_ , return_tensors='pt' )
_snake_case = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=lowerCAmelCase_ )
# greedy generation outputs
_snake_case = model.generate(lowerCAmelCase_ , max_new_tokens=64 , top_p=lowerCAmelCase_ , temperature=1 , do_sample=lowerCAmelCase_ )
_snake_case = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 542
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : List[Any]=18 , UpperCAmelCase : Optional[int]=30 , UpperCAmelCase : Any=400 , UpperCAmelCase : List[str]=True , UpperCAmelCase : List[Any]=None , UpperCAmelCase : List[str]=True , ):
A_ = size if size is not None else {"height": 18, "width": 18}
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = min_resolution
A_ = max_resolution
A_ = do_resize
A_ = size
A_ = apply_ocr
def __A ( self : str ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Any = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __A ( self : Any ):
A_ = LayoutLMvaImageProcessingTester(self )
@property
def __A ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ):
A_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase , "apply_ocr" ) )
def __A ( self : Any ):
A_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __A ( self : Optional[int] ):
pass
def __A ( self : List[str] ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase )
self.assertIsInstance(encoding.boxes , UpperCAmelCase )
# Test batched
A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Union[str, Any] ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Dict ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Tuple ):
# with apply_OCR = True
A_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
A_ = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
A_ = Image.open(ds[0]["file"] ).convert("RGB" )
A_ = image_processing(UpperCAmelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A_ = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
A_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase )
self.assertListEqual(encoding.boxes , UpperCAmelCase )
# with apply_OCR = False
A_ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase )
A_ = image_processing(UpperCAmelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 86
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'post_extract_proj': 'feature_projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.upsample.0': 'encoder.upsample.projection',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def _UpperCAmelCase ( A , A , A , A , A ):
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase__ =getattr(A , A )
if weight_type is not None:
UpperCAmelCase__ =getattr(A , A ).shape
else:
UpperCAmelCase__ =hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCAmelCase__ =value
elif weight_type == "weight_g":
UpperCAmelCase__ =value
elif weight_type == "weight_v":
UpperCAmelCase__ =value
elif weight_type == "bias":
UpperCAmelCase__ =value
else:
UpperCAmelCase__ =value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =[]
UpperCAmelCase__ =fairseq_model.state_dict()
UpperCAmelCase__ =hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ =False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase__ =True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase__ ="sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase__ =True
if "*" in mapped_key:
UpperCAmelCase__ =name.split(A )[0].split("." )[-2]
UpperCAmelCase__ =mapped_key.replace("*" , A )
if "weight_g" in name:
UpperCAmelCase__ ="weight_g"
elif "weight_v" in name:
UpperCAmelCase__ ="weight_v"
elif "weight" in name:
UpperCAmelCase__ ="weight"
elif "bias" in name:
UpperCAmelCase__ ="bias"
else:
UpperCAmelCase__ =None
set_recursively(A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _UpperCAmelCase ( A , A , A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =full_name.split("conv_layers." )[-1]
UpperCAmelCase__ =name.split("." )
UpperCAmelCase__ =int(items[0] )
UpperCAmelCase__ =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCAmelCase__ =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCAmelCase__ =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCAmelCase__ =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCAmelCase__ =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(A )
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
UpperCAmelCase__ =SEWConfig()
if is_finetuned:
UpperCAmelCase__ =model.wav_encoder.wav_model.cfg
else:
UpperCAmelCase__ =model.cfg
UpperCAmelCase__ =fs_config.conv_bias
UpperCAmelCase__ =eval(fs_config.conv_feature_layers )
UpperCAmelCase__ =[x[0] for x in conv_layers]
UpperCAmelCase__ =[x[1] for x in conv_layers]
UpperCAmelCase__ =[x[2] for x in conv_layers]
UpperCAmelCase__ ="gelu"
UpperCAmelCase__ ="layer" if fs_config.extractor_mode == "layer_norm" else "group"
UpperCAmelCase__ =0.0
UpperCAmelCase__ =fs_config.activation_fn.name
UpperCAmelCase__ =fs_config.encoder_embed_dim
UpperCAmelCase__ =0.02
UpperCAmelCase__ =fs_config.encoder_ffn_embed_dim
UpperCAmelCase__ =1e-5
UpperCAmelCase__ =fs_config.encoder_layerdrop
UpperCAmelCase__ =fs_config.encoder_attention_heads
UpperCAmelCase__ =fs_config.conv_pos_groups
UpperCAmelCase__ =fs_config.conv_pos
UpperCAmelCase__ =len(A )
UpperCAmelCase__ =fs_config.encoder_layers
UpperCAmelCase__ =fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
UpperCAmelCase__ =model.cfg
UpperCAmelCase__ =fs_config.final_dropout
UpperCAmelCase__ =fs_config.layerdrop
UpperCAmelCase__ =fs_config.activation_dropout
UpperCAmelCase__ =fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
UpperCAmelCase__ =fs_config.attention_dropout
UpperCAmelCase__ =fs_config.dropout_input
UpperCAmelCase__ =fs_config.dropout
UpperCAmelCase__ =fs_config.mask_channel_length
UpperCAmelCase__ =fs_config.mask_channel_prob
UpperCAmelCase__ =fs_config.mask_length
UpperCAmelCase__ =fs_config.mask_prob
UpperCAmelCase__ ="Wav2Vec2FeatureExtractor"
UpperCAmelCase__ ="Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def _UpperCAmelCase ( A , A , A=None , A=None , A=True ):
'''simple docstring'''
if is_finetuned:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
UpperCAmelCase__ =SEWConfig.from_pretrained(A )
else:
UpperCAmelCase__ =convert_config(model[0] , A )
UpperCAmelCase__ =model[0].eval()
UpperCAmelCase__ =True if config.feat_extract_norm == "layer" else False
UpperCAmelCase__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
if is_finetuned:
if dict_path:
UpperCAmelCase__ =Dictionary.load(A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase__ =target_dict.pad_index
UpperCAmelCase__ =target_dict.bos_index
UpperCAmelCase__ =target_dict.pad_index
UpperCAmelCase__ =target_dict.bos_index
UpperCAmelCase__ =target_dict.eos_index
UpperCAmelCase__ =len(target_dict.symbols )
UpperCAmelCase__ =os.path.join(A , "vocab.json" )
if not os.path.isdir(A ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(A ) )
return
os.makedirs(A , exist_ok=A )
with open(A , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , A )
UpperCAmelCase__ =WavaVecaCTCTokenizer(
A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=A , )
UpperCAmelCase__ =WavaVecaProcessor(feature_extractor=A , tokenizer=A )
processor.save_pretrained(A )
UpperCAmelCase__ =SEWForCTC(A )
else:
UpperCAmelCase__ =SEWModel(A )
feature_extractor.save_pretrained(A )
recursively_load_weights(A , A , A )
hf_model.save_pretrained(A )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCamelCase_ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 625
| 0
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowercase_ ( _A : str ):
"""simple docstring"""
lowerCamelCase__ : str = analyze_text(__UpperCamelCase )
lowerCamelCase__ : Any = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : str = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : Tuple = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : str = single_char_strings[ch]
lowerCamelCase__ : List[Any] = my_str / all_sum
my_fir_sum += prob * math.loga(__UpperCamelCase ) # entropy formula.
# print entropy
print(F"{round(-1 * my_fir_sum ):.1f}" )
# two len string
lowerCamelCase__ : str = sum(two_char_strings.values() )
lowerCamelCase__ : int = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : str = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : Any = two_char_strings[sequence]
lowerCamelCase__ : Optional[int] = int(__UpperCamelCase ) / all_sum
my_sec_sum += prob * math.loga(__UpperCamelCase )
# print second entropy
print(F"{round(-1 * my_sec_sum ):.1f}" )
# print the difference between them
print(F"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" )
def lowercase_ ( _A : str ):
"""simple docstring"""
lowerCamelCase__ : str = Counter() # type: ignore
lowerCamelCase__ : int = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__UpperCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowercase_ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 720
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A : Union[str, Any] = logging.get_logger(__name__)
A : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowercase :
"""simple docstring"""
A__ = field(
default=lowercase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase__)})
A__ = field(
default=lowercase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."})
A__ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ = field(
default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
A__ = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
A__ = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
A__ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"})
A__ = field(
default=lowercase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."})
A__ = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
A__ = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
A__ = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
A__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"})
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "train"
A__ = "dev"
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = 42
A__ = 42
A__ = 42
A__ = 42
def __init__( self : Optional[int] , __lowerCamelCase : SquadDataTrainingArguments , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Split] = Split.train , __lowerCamelCase : Optional[bool] = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "pt" , ):
'''simple docstring'''
lowerCamelCase__ : List[str] = args
lowerCamelCase__ : Tuple = is_language_sensitive
lowerCamelCase__ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
lowerCamelCase__ : List[str] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase__ : str = mode
# Load data features from cache or dataset file
lowerCamelCase__ : Any = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase__ : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ : List[str] = cached_features_file + ".lock"
with FileLock(__lowerCamelCase ):
if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache:
lowerCamelCase__ : str = time.time()
lowerCamelCase__ : Tuple = torch.load(__lowerCamelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase__ : Optional[Any] = self.old_features["features"]
lowerCamelCase__ : Optional[int] = self.old_features.get("dataset" , __lowerCamelCase )
lowerCamelCase__ : Optional[Any] = self.old_features.get("examples" , __lowerCamelCase )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run" )
else:
if mode == Split.dev:
lowerCamelCase__ : List[Any] = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase__ : str = self.processor.get_train_examples(args.data_dir )
lowerCamelCase__ , lowerCamelCase__ : Tuple = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCamelCase , )
lowerCamelCase__ : int = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , __lowerCamelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : List[str] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.features[i]
lowerCamelCase__ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase__ : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase__ : Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase__ : Any = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase__ : Any = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase__ : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase__ : List[str] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 5
| 0
|
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
a = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
a = dataset.iloc[:, 1:2].values
a = dataset.iloc[:, 2].values
a , a , a , a = train_test_split(X, y, test_size=0.2, random_state=0)
a = PolynomialFeatures(degree=4)
a = poly_reg.fit_transform(X)
a = LinearRegression()
pol_reg.fit(X_poly, y)
def lowercase () -> Union[str, Any]:
'''simple docstring'''
plt.scatter(snake_case__ , snake_case__ , color="""red""" )
plt.plot(snake_case__ , pol_reg.predict(poly_reg.fit_transform(snake_case__ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 169
|
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'efficientformer'
def __init__( self : Any , lowerCAmelCase : List[int] = [3, 2, 6, 4] , lowerCAmelCase : List[int] = [48, 96, 224, 448] , lowerCAmelCase : List[bool] = [True, True, True, True] , lowerCAmelCase : int = 448 , lowerCAmelCase : int = 32 , lowerCAmelCase : int = 4 , lowerCAmelCase : int = 7 , lowerCAmelCase : int = 5 , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 4 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : int = 16 , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : int = 1 , lowerCAmelCase : bool = True , lowerCAmelCase : bool = True , lowerCAmelCase : float = 1e-5 , lowerCAmelCase : str = "gelu" , lowerCAmelCase : float = 0.02 , lowerCAmelCase : float = 1e-12 , lowerCAmelCase : int = 224 , lowerCAmelCase : float = 1e-05 , **lowerCAmelCase : int , ):
super().__init__(**lowerCAmelCase )
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = hidden_sizes
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = depths
lowerCAmelCase = mlp_expansion_ratio
lowerCAmelCase = downsamples
lowerCAmelCase = dim
lowerCAmelCase = key_dim
lowerCAmelCase = attention_ratio
lowerCAmelCase = resolution
lowerCAmelCase = pool_size
lowerCAmelCase = downsample_patch_size
lowerCAmelCase = downsample_stride
lowerCAmelCase = downsample_pad
lowerCAmelCase = drop_path_rate
lowerCAmelCase = num_metaad_blocks
lowerCAmelCase = distillation
lowerCAmelCase = use_layer_scale
lowerCAmelCase = layer_scale_init_value
lowerCAmelCase = image_size
lowerCAmelCase = batch_norm_eps
| 169
| 1
|
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
return "".join(sorted(__magic_name__ ) )
def __UpperCAmelCase ( __magic_name__ )-> list[str]:
"""simple docstring"""
return word_by_signature[signature(__magic_name__ )]
__lowerCamelCase : str = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
__lowerCamelCase : Optional[int] = sorted({word.strip().lower() for word in data.splitlines()})
__lowerCamelCase : Optional[int] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__lowerCamelCase : Any = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 656
|
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.nn.Linear(10 , 10 )
snake_case_ : Dict = torch.optim.SGD(model.parameters() , 0.1 )
snake_case_ : Tuple = Accelerator()
snake_case_ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : torch.FloatTensor
class a__ ( a__ , a__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCamelCase_ = 16 , lowerCamelCase_ = 88 , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = 1 , lowerCamelCase_ = 0.0 , lowerCamelCase_ = 32 , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = "geglu" , lowerCamelCase_ = True , lowerCamelCase_ = True , ) -> Tuple:
super().__init__()
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = attention_head_dim
lowerCAmelCase__ = num_attention_heads * attention_head_dim
lowerCAmelCase__ = in_channels
lowerCAmelCase__ = torch.nn.GroupNorm(num_groups=lowerCamelCase_ , num_channels=lowerCamelCase_ , eps=1e-6 , affine=lowerCamelCase_ )
lowerCAmelCase__ = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
# 3. Define transformers blocks
lowerCAmelCase__ = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , dropout=lowerCamelCase_ , cross_attention_dim=lowerCamelCase_ , activation_fn=lowerCamelCase_ , attention_bias=lowerCamelCase_ , double_self_attention=lowerCamelCase_ , norm_elementwise_affine=lowerCamelCase_ , )
for d in range(lowerCamelCase_ )
] )
lowerCAmelCase__ = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=1 , lowerCamelCase_=None , lowerCamelCase_ = True , ) -> Union[str, Any]:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = hidden_states.shape
lowerCAmelCase__ = batch_frames // num_frames
lowerCAmelCase__ = hidden_states
lowerCAmelCase__ = hidden_states[None, :].reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCAmelCase__ = self.norm(lowerCamelCase_ )
lowerCAmelCase__ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = self.proj_in(lowerCamelCase_ )
# 2. Blocks
for block in self.transformer_blocks:
lowerCAmelCase__ = block(
lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , timestep=lowerCamelCase_ , cross_attention_kwargs=lowerCamelCase_ , class_labels=lowerCamelCase_ , )
# 3. Output
lowerCAmelCase__ = self.proj_out(lowerCamelCase_ )
lowerCAmelCase__ = (
hidden_states[None, None, :]
.reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCAmelCase__ = hidden_states.reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=lowerCamelCase_ )
| 90
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def UpperCAmelCase_ (_lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
__UpperCamelCase : int = quote(_lowerCAmelCase )
return hfh.hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" , revision=_lowerCAmelCase )
| 327
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> Optional[Any]:
A__ = "ZinengTang/tvlt-base"
A__ = tempfile.mkdtemp()
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
return TvltImageProcessor.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> int:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def snake_case__ ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self ) -> Any:
A__ = self.get_image_processor()
A__ = self.get_feature_extractor()
A__ = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
A__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _UpperCAmelCase )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def snake_case__ ( self ) -> Tuple:
A__ = self.get_image_processor()
A__ = self.get_feature_extractor()
A__ = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase )
A__ = np.ones([12000] )
A__ = feature_extractor(_UpperCAmelCase , return_tensors="np" )
A__ = processor(audio=_UpperCAmelCase , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self ) -> Any:
A__ = self.get_image_processor()
A__ = self.get_feature_extractor()
A__ = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase )
A__ = np.ones([3, 224, 224] )
A__ = image_processor(_UpperCAmelCase , return_tensors="np" )
A__ = processor(images=_UpperCAmelCase , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self ) -> List[Any]:
A__ = self.get_image_processor()
A__ = self.get_feature_extractor()
A__ = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase )
A__ = np.ones([12000] )
A__ = np.ones([3, 224, 224] )
A__ = processor(audio=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def snake_case__ ( self ) -> Optional[int]:
A__ = self.get_image_processor()
A__ = self.get_feature_extractor()
A__ = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 714
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
super().__init__()
# make sure scheduler can always be converted to DDIM
A__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = 50 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "pil" , SCREAMING_SNAKE_CASE__ = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE__ ):
A__ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
A__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
A__ = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A__ = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A__ = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ , use_clipped_model_output=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
A__ = (image / 2 + 0.5).clamp(0 , 1 )
A__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 562
| 0
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple=False ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'module.blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'module.blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'module.blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'module.blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'module.blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCamelCase : Optional[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCAmelCase__ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any]=False ) -> Optional[int]:
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCamelCase : List[str] = ''
else:
__lowerCamelCase : List[Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase : Optional[Any] = state_dict.pop(F'module.blocks.{i}.attn.qkv.weight' )
__lowerCamelCase : Optional[Any] = state_dict.pop(F'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase : List[Any] = in_proj_weight[
: config.hidden_size, :
]
__lowerCamelCase : Dict = in_proj_bias[: config.hidden_size]
__lowerCamelCase : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase : Dict = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] ) -> List[str]:
__lowerCamelCase : Any = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : Any ) -> Tuple:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
__lowerCamelCase : List[Any] = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ) -> str:
__lowerCamelCase : Any = dct.pop(UpperCAmelCase_ )
__lowerCamelCase : Optional[int] = val
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] ) -> str:
__lowerCamelCase : Dict = ViTMSNConfig()
__lowerCamelCase : Dict = 10_00
__lowerCamelCase : Optional[Any] = 'datasets/huggingface/label-files'
__lowerCamelCase : Tuple = 'imagenet-1k-id2label.json'
__lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ ) , 'r' ) )
__lowerCamelCase : Tuple = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCamelCase : Any = idalabel
__lowerCamelCase : str = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__lowerCamelCase : Union[str, Any] = 3_84
__lowerCamelCase : Tuple = 15_36
__lowerCamelCase : int = 6
elif "l16" in checkpoint_url:
__lowerCamelCase : Any = 10_24
__lowerCamelCase : int = 40_96
__lowerCamelCase : int = 24
__lowerCamelCase : Any = 16
__lowerCamelCase : List[str] = 0.1
elif "b4" in checkpoint_url:
__lowerCamelCase : Union[str, Any] = 4
elif "l7" in checkpoint_url:
__lowerCamelCase : str = 7
__lowerCamelCase : Union[str, Any] = 10_24
__lowerCamelCase : List[Any] = 40_96
__lowerCamelCase : Union[str, Any] = 24
__lowerCamelCase : Optional[Any] = 16
__lowerCamelCase : Optional[Any] = 0.1
__lowerCamelCase : Optional[Any] = ViTMSNModel(UpperCAmelCase_ )
__lowerCamelCase : int = torch.hub.load_state_dict_from_url(UpperCAmelCase_ , map_location='cpu' )['target_encoder']
__lowerCamelCase : List[Any] = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCAmelCase_ )
__lowerCamelCase : List[Any] = create_rename_keys(UpperCAmelCase_ , base_model=UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ , base_model=UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
model.eval()
__lowerCamelCase : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase : Optional[Any] = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
__lowerCamelCase : str = ViTImageProcessor(
size=config.image_size , image_mean=UpperCAmelCase_ , image_std=UpperCAmelCase_ )
__lowerCamelCase : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
__lowerCamelCase : int = model(**UpperCAmelCase_ )
__lowerCamelCase : Optional[int] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__lowerCamelCase : Optional[Any] = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
__lowerCamelCase : List[Any] = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
__lowerCamelCase : str = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
__lowerCamelCase : int = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
__lowerCamelCase : Dict = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCAmelCase_ , atol=1e-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
A__ : int = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 13
|
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = flatten_dict(lowerCAmelCase_ )
return flax_params
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {
"token_embedder": "embeddings",
"encoder_norm": "layernorm",
"kernel": "weight",
".out": ".output",
"scale": "weight",
"embedders_0.pos_embedding": "row_embedder.weight",
"embedders_1.pos_embedding": "column_embedder.weight",
}
__SCREAMING_SNAKE_CASE = {
"query": "attention.query",
"key": "attention.key",
"value": "attention.value",
"output.dense": "output",
"encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
"pre_self_attention_layer_norm": "self_attention.layer_norm",
"pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
"mlp.": "mlp.DenseReluDense.",
"pre_mlp_layer_norm": "mlp.layer_norm",
"self_attention.o": "self_attention.attention.o",
"decoder.embeddings.embedding": "decoder.embed_tokens.weight",
"decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
"decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
"decoder.logits_dense.weight": "decoder.lm_head.weight",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__SCREAMING_SNAKE_CASE = ".".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__SCREAMING_SNAKE_CASE = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__SCREAMING_SNAKE_CASE = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__SCREAMING_SNAKE_CASE = re.sub(R"layers_(\d+)" , R"layer.\1" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = new_key.replace("encoder" , "encoder.encoder" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__SCREAMING_SNAKE_CASE = re.sub(R"layers_(\d+)" , R"layer.\1" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = flax_dict[key]
__SCREAMING_SNAKE_CASE = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__SCREAMING_SNAKE_CASE = torch.from_numpy(converted_dict[key].T )
else:
__SCREAMING_SNAKE_CASE = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_flax_param(lowerCAmelCase_ )
if not use_large:
__SCREAMING_SNAKE_CASE = PixaStructVisionConfig()
__SCREAMING_SNAKE_CASE = PixaStructTextConfig()
else:
__SCREAMING_SNAKE_CASE = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
__SCREAMING_SNAKE_CASE = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
__SCREAMING_SNAKE_CASE = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = PixaStructForConditionalGeneration(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = rename_and_convert_flax_params(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" )
__SCREAMING_SNAKE_CASE = PixaStructImageProcessor()
__SCREAMING_SNAKE_CASE = PixaStructProcessor(image_processor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
if use_large:
__SCREAMING_SNAKE_CASE = 4096
__SCREAMING_SNAKE_CASE = True
# mkdir if needed
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
print("Model saved in {}".format(lowerCAmelCase_ ) )
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
a__ : Optional[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 682
| 0
|
class _SCREAMING_SNAKE_CASE :
def __init__( self ) -> List[Any]:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = {}
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
if vertex not in self.adjacency:
lowerCamelCase_ = {}
self.num_vertices += 1
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> int:
self.add_vertex(lowercase )
self.add_vertex(lowercase )
if head == tail:
return
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = self.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for i in range(len(lowercase ) ):
lowerCamelCase_ = list(edges[i] )
edges.sort(key=lambda lowercase : e[2] )
for i in range(len(lowercase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowerCamelCase_ = edges[i][2] + 1
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def __str__( self ) -> Any:
lowerCamelCase_ = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowerCamelCase_ = self.adjacency[head][tail]
string += f'{head} -> {tail} == {weight}\n'
return string.rstrip("\n" )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return self.adjacency.keys()
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase=None , lowercase=None ) -> Optional[int]:
lowerCamelCase_ = Graph()
if vertices is None:
lowerCamelCase_ = []
if edges is None:
lowerCamelCase_ = []
for vertex in vertices:
g.add_vertex(lowercase )
for edge in edges:
g.add_edge(*lowercase )
return g
class _SCREAMING_SNAKE_CASE :
def __init__( self ) -> Tuple:
lowerCamelCase_ = {}
lowerCamelCase_ = {}
def __len__( self ) -> Any:
return len(self.parent )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Optional[Any]:
if item in self.parent:
return self.find(lowercase )
lowerCamelCase_ = item
lowerCamelCase_ = 0
return item
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(lowercase )
if item != self.parent[item]:
lowerCamelCase_ = self.find(self.parent[item] )
return self.parent[item]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> List[str]:
lowerCamelCase_ = self.find(lowercase )
lowerCamelCase_ = self.find(lowercase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowerCamelCase_ = roota
return roota
return None
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> Dict:
lowerCamelCase_ = graph.num_vertices
lowerCamelCase_ = Graph.UnionFind()
lowerCamelCase_ = []
while num_components > 1:
lowerCamelCase_ = {}
for vertex in graph.get_vertices():
lowerCamelCase_ = -1
lowerCamelCase_ = graph.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = union_find.find(lowercase )
lowerCamelCase_ = union_find.find(lowercase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = cheap_edge[vertex]
if union_find.find(lowercase ) != union_find.find(lowercase ):
union_find.union(lowercase , lowercase )
mst_edges.append(cheap_edge[vertex] )
lowerCamelCase_ = num_components - 1
lowerCamelCase_ = Graph.build(edges=lowercase )
return mst
| 313
|
import numpy as np
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = int(np.ceil((x_end - xa) / h ) )
lowerCamelCase_ = np.zeros((n + 1,) )
lowerCamelCase_ = ya
lowerCamelCase_ = xa
for k in range(lowerCamelCase__ ):
lowerCamelCase_ = f(lowerCamelCase__ , y[k] )
lowerCamelCase_ = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowerCamelCase_ = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowerCamelCase_ = f(x + h , y[k] + h * ka )
lowerCamelCase_ = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 1
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> str:
UpperCAmelCase_ : int = params
UpperCAmelCase_ : Dict = np.array(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = np.array([len(_UpperCamelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , _UpperCamelCase ) -> Optional[Any]:
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> str:
return len(self.lengths )
def __UpperCAmelCase ( self ) -> str:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : int = self.params.max_model_input_size
UpperCAmelCase_ : Optional[int] = self.lengths > max_len
logger.info(f"Splitting {sum(_UpperCamelCase )} too long sequences." )
def divide_chunks(_UpperCamelCase , _UpperCamelCase ):
return [l[i : i + n] for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )]
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Any = []
if self.params.mlm:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
UpperCAmelCase_ : int = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
UpperCAmelCase_ : List[Any] = np.insert(_UpperCamelCase , 0 , _UpperCamelCase )
if sub_s[-1] != sep_id:
UpperCAmelCase_ : List[str] = np.insert(_UpperCamelCase , len(_UpperCamelCase ) , _UpperCamelCase )
assert len(_UpperCamelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_UpperCamelCase )
new_tok_ids.extend(_UpperCamelCase )
new_lengths.extend([len(_UpperCamelCase ) for l in sub_seqs] )
UpperCAmelCase_ : Optional[int] = np.array(_UpperCamelCase )
UpperCAmelCase_ : Tuple = np.array(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Optional[int] = len(self )
UpperCAmelCase_ : int = self.lengths > 1_1
UpperCAmelCase_ : List[str] = self.token_ids[indices]
UpperCAmelCase_ : Union[str, Any] = self.lengths[indices]
UpperCAmelCase_ : Optional[Any] = len(self )
logger.info(f"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def __UpperCAmelCase ( self ) -> Dict:
if "unk_token" not in self.params.special_tok_ids:
return
else:
UpperCAmelCase_ : List[Any] = self.params.special_tok_ids['unk_token']
UpperCAmelCase_ : Optional[int] = len(self )
UpperCAmelCase_ : List[str] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
UpperCAmelCase_ : Optional[int] = (unk_occs / self.lengths) < 0.5
UpperCAmelCase_ : Optional[Any] = self.token_ids[indices]
UpperCAmelCase_ : Optional[int] = self.lengths[indices]
UpperCAmelCase_ : List[str] = len(self )
logger.info(f"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def __UpperCAmelCase ( self ) -> List[str]:
if not self.params.is_master:
return
logger.info(f"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : int = [t[0] for t in batch]
UpperCAmelCase_ : Optional[Any] = [t[1] for t in batch]
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
# Max for paddings
UpperCAmelCase_ : Optional[int] = max(_UpperCamelCase )
# Pad token ids
if self.params.mlm:
UpperCAmelCase_ : List[Any] = self.params.special_tok_ids['pad_token']
else:
UpperCAmelCase_ : Union[str, Any] = self.params.special_tok_ids['unk_token']
UpperCAmelCase_ : Optional[Any] = [list(t.astype(_UpperCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(_UpperCamelCase )) for t in token_ids]
assert len(tk_ ) == len(_UpperCamelCase )
assert all(len(_UpperCamelCase ) == max_seq_len_ for t in tk_ )
UpperCAmelCase_ : int = torch.tensor(tk_ ) # (bs, max_seq_len_)
UpperCAmelCase_ : Dict = torch.tensor(_UpperCamelCase ) # (bs)
return tk_t, lg_t
| 406
|
from math import isqrt
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
UpperCAmelCase_ : List[Any] = False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def lowercase__ ( __snake_case : int = 10**8 ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Union[str, Any] = len(__snake_case ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 406
| 1
|
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif stress < 0:
raise ValueError("""Stress cannot be negative""" )
elif tangential_force < 0:
raise ValueError("""Tangential Force cannot be negative""" )
elif area < 0:
raise ValueError("""Area cannot be negative""" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128
|
"""simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__snake_case = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
requires_backends(self , """decord""" )
self.check_model_type(lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None ) -> Optional[int]:
lowercase__ : int = {}
if frame_sampling_rate is not None:
lowercase__ : Optional[Any] = frame_sampling_rate
if num_frames is not None:
lowercase__ : List[str] = num_frames
lowercase__ : str = {}
if top_k is not None:
lowercase__ : Any = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
return super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=1 ) -> Optional[Any]:
if num_frames is None:
lowercase__ : Dict = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
lowercase__ : Tuple = BytesIO(requests.get(lowerCamelCase__ ).content )
lowercase__ : int = VideoReader(lowerCamelCase__ )
videoreader.seek(0 )
lowercase__ : Optional[int] = 0
lowercase__ : Optional[int] = num_frames * frame_sampling_rate - 1
lowercase__ : Optional[Any] = np.linspace(lowerCamelCase__ , lowerCamelCase__ , num=lowerCamelCase__ , dtype=np.intaa )
lowercase__ : Union[str, Any] = videoreader.get_batch(lowerCamelCase__ ).asnumpy()
lowercase__ : Optional[int] = list(lowerCamelCase__ )
lowercase__ : Optional[int] = self.image_processor(lowerCamelCase__ , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Optional[Any]:
lowercase__ : int = self.model(**lowerCamelCase__ )
return model_outputs
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__=5 ) -> Any:
if top_k > self.model.config.num_labels:
lowercase__ : int = self.model.config.num_labels
if self.framework == "pt":
lowercase__ : List[Any] = model_outputs.logits.softmax(-1 )[0]
lowercase__ , lowercase__ : int = probs.topk(lowerCamelCase__ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
lowercase__ : str = scores.tolist()
lowercase__ : List[str] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase__ , lowerCamelCase__ )]
| 128
| 1
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class a :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=33 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = parent
__SCREAMING_SNAKE_CASE: Any = batch_size
__SCREAMING_SNAKE_CASE: Dict = seq_length
__SCREAMING_SNAKE_CASE: List[Any] = is_training
__SCREAMING_SNAKE_CASE: List[Any] = use_input_mask
__SCREAMING_SNAKE_CASE: str = use_token_type_ids
__SCREAMING_SNAKE_CASE: Any = use_labels
__SCREAMING_SNAKE_CASE: Optional[int] = vocab_size
__SCREAMING_SNAKE_CASE: Dict = hidden_size
__SCREAMING_SNAKE_CASE: Dict = num_hidden_layers
__SCREAMING_SNAKE_CASE: Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE: str = intermediate_size
__SCREAMING_SNAKE_CASE: str = hidden_act
__SCREAMING_SNAKE_CASE: Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE: Union[str, Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE: Any = max_position_embeddings
__SCREAMING_SNAKE_CASE: Optional[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE: List[Any] = type_sequence_label_size
__SCREAMING_SNAKE_CASE: Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE: Union[str, Any] = num_labels
__SCREAMING_SNAKE_CASE: List[str] = num_choices
__SCREAMING_SNAKE_CASE: List[Any] = scope
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE: int = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE: Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE: Optional[Any] = None
__SCREAMING_SNAKE_CASE: Optional[int] = None
__SCREAMING_SNAKE_CASE: Optional[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE: List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE: str = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE: Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ):
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = EsmModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Union[str, Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = model(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = EsmForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Optional[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = self.num_labels
__SCREAMING_SNAKE_CASE: List[Any] = EsmForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: int = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,
): int = config_and_inputs
__SCREAMING_SNAKE_CASE: Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( __lowercase ,__lowercase ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Optional[int] = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : int = ()
SCREAMING_SNAKE_CASE__ : Optional[int] = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = True
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = EsmModelTester(self )
__SCREAMING_SNAKE_CASE: List[str] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def snake_case_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE: Any = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
@slow
def snake_case_ ( self ):
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE: Optional[int] = EsmModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__SCREAMING_SNAKE_CASE: int = EsmEmbeddings(config=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
__SCREAMING_SNAKE_CASE: Optional[Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__SCREAMING_SNAKE_CASE: Any = create_position_ids_from_input_ids(_lowerCAmelCase , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_lowerCAmelCase , _lowerCAmelCase ) ) )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = self.model_tester.prepare_config_and_inputs()[0]
__SCREAMING_SNAKE_CASE: Dict = EsmEmbeddings(config=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[int] = torch.empty(2 , 4 , 30 )
__SCREAMING_SNAKE_CASE: List[str] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__SCREAMING_SNAKE_CASE: str = torch.as_tensor([expected_single_positions, expected_single_positions] )
__SCREAMING_SNAKE_CASE: Optional[Any] = embeddings.create_position_ids_from_inputs_embeds(_lowerCAmelCase )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_lowerCAmelCase , _lowerCAmelCase ) ) )
@unittest.skip('''Esm does not support embedding resizing''' )
def snake_case_ ( self ):
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def snake_case_ ( self ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case_ ( self ):
"""simple docstring"""
pass
@require_torch
class a ( __lowercase ):
@slow
def snake_case_ ( self ):
"""simple docstring"""
with torch.no_grad():
__SCREAMING_SNAKE_CASE: Optional[int] = EsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
__SCREAMING_SNAKE_CASE: Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE: str = model(_lowerCAmelCase )[0]
__SCREAMING_SNAKE_CASE: Union[str, Any] = 33
__SCREAMING_SNAKE_CASE: Dict = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[int] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def snake_case_ ( self ):
"""simple docstring"""
with torch.no_grad():
__SCREAMING_SNAKE_CASE: Dict = EsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
__SCREAMING_SNAKE_CASE: Dict = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__SCREAMING_SNAKE_CASE: Union[str, Any] = model(_lowerCAmelCase )[0]
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE: Optional[int] = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 202
|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( __lowercase ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = ConsistencyModelPipeline
SCREAMING_SNAKE_CASE__ : Any = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ : str = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
SCREAMING_SNAKE_CASE__ : Optional[int] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def snake_case_ ( self , _lowerCAmelCase=False ):
"""simple docstring"""
if class_cond:
__SCREAMING_SNAKE_CASE: List[Any] = self.dummy_cond_unet
else:
__SCREAMING_SNAKE_CASE: Tuple = self.dummy_uncond_unet
# Default to CM multistep sampler
__SCREAMING_SNAKE_CASE: Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__SCREAMING_SNAKE_CASE: List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
"""simple docstring"""
if str(_lowerCAmelCase ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE: List[str] = torch.manual_seed(_lowerCAmelCase )
else:
__SCREAMING_SNAKE_CASE: List[str] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE: Optional[int] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE: Optional[Any] = ConsistencyModelPipeline(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[int] = self.get_dummy_inputs(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = pipe(**_lowerCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE: int = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE: Optional[Any] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE: str = self.get_dummy_components(class_cond=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = ConsistencyModelPipeline(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Dict = self.get_dummy_inputs(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Dict = 0
__SCREAMING_SNAKE_CASE: Optional[Any] = pipe(**_lowerCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE: Optional[Any] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE: Optional[Any] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE: Tuple = ConsistencyModelPipeline(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[str] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[Any] = self.get_dummy_inputs(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = 1
__SCREAMING_SNAKE_CASE: Union[str, Any] = None
__SCREAMING_SNAKE_CASE: List[str] = pipe(**_lowerCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE: Tuple = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE: Optional[Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE: Optional[Any] = self.get_dummy_components(class_cond=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[Any] = ConsistencyModelPipeline(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = self.get_dummy_inputs(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = 1
__SCREAMING_SNAKE_CASE: List[Any] = None
__SCREAMING_SNAKE_CASE: Tuple = 0
__SCREAMING_SNAKE_CASE: List[str] = pipe(**_lowerCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE: List[Any] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE: Any = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self , _lowerCAmelCase=0 , _lowerCAmelCase=False , _lowerCAmelCase="cpu" , _lowerCAmelCase=torch.floataa , _lowerCAmelCase=(1, 3, 64, 64) ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = torch.manual_seed(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Dict = {
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
__SCREAMING_SNAKE_CASE: Dict = self.get_fixed_latents(seed=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase , shape=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = latents
return inputs
def snake_case_ ( self , _lowerCAmelCase=0 , _lowerCAmelCase="cpu" , _lowerCAmelCase=torch.floataa , _lowerCAmelCase=(1, 3, 64, 64) ):
"""simple docstring"""
if type(_lowerCAmelCase ) == str:
__SCREAMING_SNAKE_CASE: List[Any] = torch.device(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Dict = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
return latents
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE: Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__SCREAMING_SNAKE_CASE: Optional[int] = ConsistencyModelPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
pipe.to(torch_device=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = self.get_inputs()
__SCREAMING_SNAKE_CASE: List[Any] = pipe(**_lowerCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE: Dict = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE: Any = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE: str = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__SCREAMING_SNAKE_CASE: Union[str, Any] = ConsistencyModelPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
pipe.to(torch_device=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[str] = self.get_inputs()
__SCREAMING_SNAKE_CASE: Union[str, Any] = 1
__SCREAMING_SNAKE_CASE: List[Any] = None
__SCREAMING_SNAKE_CASE: Optional[int] = pipe(**_lowerCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE: Dict = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE: Any = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE: Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__SCREAMING_SNAKE_CASE: List[Any] = ConsistencyModelPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
pipe.to(torch_device=_lowerCAmelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = self.get_inputs(get_fixed_latents=_lowerCAmelCase , device=_lowerCAmelCase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_lowerCAmelCase , enable_math=_lowerCAmelCase , enable_mem_efficient=_lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Dict = pipe(**_lowerCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE: List[Any] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE: Optional[int] = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE: List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__SCREAMING_SNAKE_CASE: List[Any] = ConsistencyModelPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
pipe.to(torch_device=_lowerCAmelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = self.get_inputs(get_fixed_latents=_lowerCAmelCase , device=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = 1
__SCREAMING_SNAKE_CASE: List[Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_lowerCAmelCase , enable_math=_lowerCAmelCase , enable_mem_efficient=_lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Optional[int] = pipe(**_lowerCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE: Tuple = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE: Tuple = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 202
| 1
|
'''simple docstring'''
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = TransfoXLTokenizer
__UpperCamelCase = False
__UpperCamelCase = False
def _UpperCamelCase ( self ):
'''simple docstring'''
super().setUp()
snake_case: Dict = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
snake_case: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: str = '<unk> UNwanted , running'
snake_case: int = '<unk> unwanted, running'
return input_text, output_text
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [0, 4, 8, 7] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
snake_case: Optional[int] = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.get_tokenizer()
snake_case: List[Any] = len(SCREAMING_SNAKE_CASE__ )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 692
|
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__UpperCAmelCase = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase = "PoolFormerConfig"
# Base docstring
__UpperCAmelCase = "sail/poolformer_s12"
__UpperCAmelCase = [1, 512, 7, 7]
# Image classification docstring
__UpperCAmelCase = "sail/poolformer_s12"
__UpperCAmelCase = "tabby, tabby cat"
__UpperCAmelCase = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCAmelCase_ ( __A : Tuple , __A : float = 0.0 , __A : bool = False ):
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
snake_case: Union[str, Any] = 1 - drop_prob
snake_case: List[Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
snake_case: List[Any] = keep_prob + torch.rand(__A , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
snake_case: Any = input.div(__A ) * random_tensor
return output
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
super().__init__()
snake_case: List[str] = drop_prob
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return drop_path(SCREAMING_SNAKE_CASE__ , self.drop_prob , self.training )
def _UpperCamelCase ( self ):
'''simple docstring'''
return "p={}".format(self.drop_prob )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
super().__init__()
snake_case: List[str] = patch_size if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (patch_size, patch_size)
snake_case: List[str] = stride if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (stride, stride)
snake_case: Union[str, Any] = padding if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (padding, padding)
snake_case: Any = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = norm_layer(SCREAMING_SNAKE_CASE__ ) if norm_layer else nn.Identity()
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: int = self.projection(SCREAMING_SNAKE_CASE__ )
snake_case: Any = self.norm(SCREAMING_SNAKE_CASE__ )
return embeddings
class SCREAMING_SNAKE_CASE ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: str = nn.AvgPoolad(SCREAMING_SNAKE_CASE__ , stride=1 , padding=pool_size // 2 , count_include_pad=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.pool(SCREAMING_SNAKE_CASE__ ) - hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: List[Any] = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
snake_case: Any = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
snake_case: str = PoolFormerDropPath(SCREAMING_SNAKE_CASE__ )
if isinstance(config.hidden_act , SCREAMING_SNAKE_CASE__ ):
snake_case: Tuple = ACTaFN[config.hidden_act]
else:
snake_case: int = config.hidden_act
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = self.conva(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.act_fn(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.drop(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.conva(SCREAMING_SNAKE_CASE__ )
snake_case: str = self.drop(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Tuple = PoolFormerPooling(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = PoolFormerOutput(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Dict = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE__ )
# Useful for training neural nets
snake_case: Union[str, Any] = PoolFormerDropPath(SCREAMING_SNAKE_CASE__ ) if drop_path > 0.0 else nn.Identity()
snake_case: Optional[Any] = config.use_layer_scale
if config.use_layer_scale:
snake_case: Any = nn.Parameter(
config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE__) ) , requires_grad=SCREAMING_SNAKE_CASE__ )
snake_case: int = nn.Parameter(
config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE__) ) , requires_grad=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if self.use_layer_scale:
snake_case: str = self.pooling(self.before_norm(SCREAMING_SNAKE_CASE__ ) )
snake_case: Dict = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
snake_case: str = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = ()
snake_case: Dict = self.output(self.after_norm(SCREAMING_SNAKE_CASE__ ) )
snake_case: Union[str, Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
snake_case: Any = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = (output,) + outputs
return outputs
else:
snake_case: Optional[Any] = self.drop_path(self.pooling(self.before_norm(SCREAMING_SNAKE_CASE__ ) ) )
# First residual connection
snake_case: Union[str, Any] = pooling_output + hidden_states
snake_case: List[Any] = ()
# Second residual connection inside the PoolFormerOutput block
snake_case: List[str] = self.drop_path(self.output(self.after_norm(SCREAMING_SNAKE_CASE__ ) ) )
snake_case: Dict = hidden_states + layer_output
snake_case: Optional[Any] = (output,) + outputs
return outputs
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: List[Any] = config
# stochastic depth decay rule
snake_case: List[Any] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
snake_case: Union[str, Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
snake_case: List[Any] = nn.ModuleList(SCREAMING_SNAKE_CASE__ )
# Transformer blocks
snake_case: str = []
snake_case: int = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
snake_case: List[str] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
SCREAMING_SNAKE_CASE__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(SCREAMING_SNAKE_CASE__ ) )
snake_case: Tuple = nn.ModuleList(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True ):
'''simple docstring'''
snake_case: str = () if output_hidden_states else None
snake_case: Dict = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
snake_case , snake_case: Dict = layers
# Get patch embeddings from hidden_states
snake_case: int = embedding_layer(SCREAMING_SNAKE_CASE__ )
# Send the embeddings through the blocks
for _, blk in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case: List[Any] = blk(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = layer_outputs[0]
if output_hidden_states:
snake_case: List[str] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = PoolFormerConfig
__UpperCamelCase = "poolformer"
__UpperCamelCase = "pixel_values"
__UpperCamelCase = True
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case: List[Any] = value
__UpperCAmelCase = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , snake_case , )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = config
snake_case: Tuple = PoolFormerEncoder(SCREAMING_SNAKE_CASE__ )
# Initialize weights and apply final processing
self.post_init()
def _UpperCamelCase ( self ):
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
'''simple docstring'''
snake_case: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case: List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
snake_case: Optional[Any] = self.encoder(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )
snake_case: List[Any] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Any = nn.Linear(config.hidden_size , config.hidden_size )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: int = self.dense(SCREAMING_SNAKE_CASE__ )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , snake_case , )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = config.num_labels
snake_case: str = PoolFormerModel(SCREAMING_SNAKE_CASE__ )
# Final norm
snake_case: int = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
snake_case: Dict = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
'''simple docstring'''
snake_case: Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case: Optional[Any] = self.poolformer(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )
snake_case: Any = outputs[0]
snake_case: str = self.classifier(self.norm(SCREAMING_SNAKE_CASE__ ).mean([-2, -1] ) )
snake_case: Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case: Tuple = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case: Dict = 'single_label_classification'
else:
snake_case: List[str] = 'multi_label_classification'
if self.config.problem_type == "regression":
snake_case: Union[str, Any] = MSELoss()
if self.num_labels == 1:
snake_case: List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case: int = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config.problem_type == "single_label_classification":
snake_case: Union[str, Any] = CrossEntropyLoss()
snake_case: Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case: int = BCEWithLogitsLoss()
snake_case: Optional[int] = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not return_dict:
snake_case: str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states )
| 692
| 1
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = TransfoXLTokenizer
_snake_case = False
_snake_case = False
def UpperCAmelCase ( self ) -> Any:
super().setUp()
snake_case : List[Any] = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def UpperCAmelCase ( self , **A ) -> Any:
snake_case : Union[str, Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Any = """<unk> UNwanted , running"""
snake_case : int = """<unk> unwanted, running"""
return input_text, output_text
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Tuple = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A )
snake_case : Union[str, Any] = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(A , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [0, 4, 8, 7] )
def UpperCAmelCase ( self ) -> Dict:
snake_case : Optional[int] = TransfoXLTokenizer(lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def UpperCAmelCase ( self ) -> str:
snake_case : Any = TransfoXLTokenizer(lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase ( self ) -> int:
snake_case : List[str] = TransfoXLTokenizer(lower_case=A )
snake_case : int = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
snake_case : int = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(A ) , A )
self.assertEqual(tokenizer.convert_tokens_to_string(A ) , A )
def UpperCAmelCase ( self ) -> str:
snake_case : Optional[int] = self.get_tokenizer()
snake_case : int = len(A )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(A ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 587
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
snake_case : Optional[int] = WavaVecaForSequenceClassification.from_pretrained(lowercase ,config=lowercase )
snake_case : List[Any] = downstream_dict["""projector.weight"""]
snake_case : Any = downstream_dict["""projector.bias"""]
snake_case : Optional[Any] = downstream_dict["""model.post_net.linear.weight"""]
snake_case : List[str] = downstream_dict["""model.post_net.linear.bias"""]
return model
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> List[Any]:
snake_case : Union[str, Any] = WavaVecaForAudioFrameClassification.from_pretrained(lowercase ,config=lowercase )
snake_case : Tuple = downstream_dict["""model.linear.weight"""]
snake_case : Optional[int] = downstream_dict["""model.linear.bias"""]
return model
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
snake_case : Tuple = WavaVecaForXVector.from_pretrained(lowercase ,config=lowercase )
snake_case : str = downstream_dict["""connector.weight"""]
snake_case : Tuple = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case : List[Any] = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
snake_case : Dict = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
snake_case : Optional[int] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
snake_case : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
snake_case : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
snake_case : str = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
snake_case : Union[str, Any] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> str:
snake_case : Optional[int] = torch.load(lowercase ,map_location="""cpu""" )
snake_case : int = checkpoint["""Downstream"""]
snake_case : str = WavaVecaConfig.from_pretrained(lowercase )
snake_case : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
lowercase ,return_attention_mask=lowercase ,do_normalize=lowercase )
snake_case : str = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
snake_case : Union[str, Any] = convert_classification(lowercase ,lowercase ,lowercase )
elif arch.endswith("""ForAudioFrameClassification""" ):
snake_case : str = convert_diarization(lowercase ,lowercase ,lowercase )
elif arch.endswith("""ForXVector""" ):
snake_case : Any = convert_xvector(lowercase ,lowercase ,lowercase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
snake_case : Tuple = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
lowerCamelCase : Optional[Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 587
| 1
|
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = tf.convert_to_tensor(
[
[
8.2_220_991, # 3rd highest value; idx. 0
-0.5_620_044,
5.23_229_752,
4.0_386_393,
-6.8_798_378,
-0.54_785_802,
-3.2_012_153,
2.92_777_176,
1.88_171_953,
7.35_341_276, # 5th highest value; idx. 9
8.43_207_833, # 2nd highest value; idx. 10
-9.85_711_836,
-5.96_209_236,
-1.13_039_161,
-7.1_115_294,
-0.8_369_633,
-5.3_186_408,
7.06_427_407,
0.81_369_344,
-0.82_023_817,
-5.9_179_796,
0.58_813_443,
-6.99_778_438,
4.71_551_189,
-0.18_771_637,
7.44_020_759, # 4th highest value; idx. 25
9.38_450_987, # 1st highest value; idx. 26
2.12_662_941,
-9.32_562_038,
2.35_652_522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_425_518,
4.53_139_238,
-5.57_510_464,
-6.28_030_699,
-7.19_529_503,
-4.02_122_551,
1.39_337_037,
-6.06_707_057,
1.59_480_517,
-9.643_119,
0.03_907_799,
0.67_231_762,
-8.88_206_726,
6.27_115_922, # 4th highest value; idx. 13
2.28_520_723,
4.82_767_506,
4.30_421_368,
8.8_275_313, # 2nd highest value; idx. 17
5.44_029_958, # 5th highest value; idx. 18
-4.4_735_794,
7.38_579_536, # 3rd highest value; idx. 20
-2.91_051_663,
2.61_946_077,
-2.5_674_762,
-9.48_959_302,
-4.02_922_645,
-1.35_416_918,
9.67_702_323, # 1st highest value; idx. 27
-5.89_478_553,
1.85_370_467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
lowerCAmelCase : int = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
lowerCAmelCase : Union[str, Any] = tf.convert_to_tensor(
[8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above
lowerCAmelCase : Optional[int] = tf_top_k_top_p_filtering(UpperCamelCase_ , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
lowerCAmelCase : Union[str, Any] = output[output != -float('''inf''' )]
lowerCAmelCase : Union[str, Any] = tf.cast(
tf.where(tf.not_equal(UpperCamelCase_ , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1E-12 )
tf.debugging.assert_equal(UpperCamelCase_ , UpperCamelCase_ )
@require_tf
class snake_case_( unittest.TestCase , a__ ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
__UpperCamelCase = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowerCamelCase__ ( self : Any ):
# TF-only test: tf.saved_model export
lowerCAmelCase : Any = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : Optional[Any] = 2
lowerCAmelCase : Any = 2
class snake_case_( tf.Module ):
def __init__( self : Any , UpperCamelCase_ : Optional[Any] ):
super(UpperCamelCase_ , self ).__init__()
lowerCAmelCase : List[str] = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=UpperCamelCase_ , )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : List[Any] = self.model.generate(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , max_new_tokens=UpperCamelCase_ , return_dict_in_generate=UpperCamelCase_ , )
return {"sequences": outputs["sequences"]}
lowerCAmelCase : Tuple = [[2, 0], [1_0_2, 1_0_3]]
lowerCAmelCase : Dict = [[1, 0], [1, 1]]
lowerCAmelCase : str = DummyModel(model=UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCamelCase_ , UpperCamelCase_ , signatures={'''serving_default''': dummy_model.serving} )
lowerCAmelCase : Union[str, Any] = tf.saved_model.load(UpperCamelCase_ ).signatures['''serving_default''']
for batch_size in range(1 , len(UpperCamelCase_ ) + 1 ):
lowerCAmelCase : List[str] = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
lowerCAmelCase : Dict = serving_func(**UpperCamelCase_ )['''sequences''']
lowerCAmelCase : Tuple = test_model.generate(**UpperCamelCase_ , max_new_tokens=UpperCamelCase_ )
tf.debugging.assert_equal(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : Any ):
# TF-only test: tf.saved_model export
lowerCAmelCase : str = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : Dict = 2
class snake_case_( tf.Module ):
def __init__( self : Dict , UpperCamelCase_ : str ):
super(UpperCamelCase_ , self ).__init__()
lowerCAmelCase : int = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=UpperCamelCase_ , )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] ):
lowerCAmelCase : List[Any] = self.model.generate(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , max_new_tokens=UpperCamelCase_ , return_dict_in_generate=UpperCamelCase_ , )
return {"sequences": outputs["sequences"]}
lowerCAmelCase : Optional[int] = [[2], [1_0_2, 1_0_3]]
lowerCAmelCase : Tuple = [[1], [1, 1]]
lowerCAmelCase : Optional[int] = DummyModel(model=UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCamelCase_ , UpperCamelCase_ , signatures={'''serving_default''': dummy_model.serving} )
lowerCAmelCase : List[Any] = tf.saved_model.load(UpperCamelCase_ ).signatures['''serving_default''']
for input_row in range(len(UpperCamelCase_ ) ):
lowerCAmelCase : Optional[Any] = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
lowerCAmelCase : Union[str, Any] = serving_func(**UpperCamelCase_ )['''sequences''']
lowerCAmelCase : Optional[Any] = test_model.generate(**UpperCamelCase_ , max_new_tokens=UpperCamelCase_ )
tf.debugging.assert_equal(UpperCamelCase_ , UpperCamelCase_ )
@slow
@require_tensorflow_text
def lowerCamelCase__ ( self : int ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=UpperCamelCase_ )
class snake_case_( tf.keras.layers.Layer ):
def __init__( self : Any ):
super().__init__()
lowerCAmelCase : Union[str, Any] = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(UpperCamelCase_ , '''spiece.model''' ) , '''rb''' ).read() )
lowerCAmelCase : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Dict , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[Any] ):
lowerCAmelCase : str = self.tokenizer.tokenize(UpperCamelCase_ )
lowerCAmelCase, lowerCAmelCase : Dict = text.pad_model_inputs(
UpperCamelCase_ , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
lowerCAmelCase : Dict = self.model.generate(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
return self.tokenizer.detokenize(UpperCamelCase_ )
lowerCAmelCase : Dict = CompleteSentenceTransformer()
lowerCAmelCase : List[Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
lowerCAmelCase : Dict = complete_model(UpperCamelCase_ )
lowerCAmelCase : int = tf.keras.Model(UpperCamelCase_ , UpperCamelCase_ )
keras_model.save(UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
# Has PT equivalent: this test relies on random sampling
lowerCAmelCase : Dict = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 1_0,
'''temperature''': 0.7,
}
lowerCAmelCase : int = 1_4
lowerCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : Union[str, Any] = '''Hello, my dog is cute and'''
lowerCAmelCase : List[Any] = tokenizer(UpperCamelCase_ , return_tensors='''tf''' )
lowerCAmelCase : List[Any] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : Union[str, Any] = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
lowerCAmelCase : Any = model.generate(**UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowerCAmelCase : int = [6_3_8, 1_9_8]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
lowerCAmelCase : str = model.generate(**UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowerCamelCase__ ( self : Dict ):
# Has PT equivalent: ample use of framework-specific code
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowerCAmelCase : Dict = '''Hugging Face is a technology company based in New York and Paris.'''
lowerCAmelCase : Tuple = bart_tokenizer(UpperCamelCase_ , return_tensors='''tf''' ).input_ids
lowerCAmelCase : Union[str, Any] = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowerCAmelCase : Optional[int] = bart_model.generate(UpperCamelCase_ ).numpy()
class snake_case_( a__ ):
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]=None , **UpperCamelCase_ : Dict ):
return super().call(UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Any = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowerCAmelCase : Tuple = bart_model.generate(UpperCamelCase_ , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(UpperCamelCase_ , UpperCamelCase_ ) )
class snake_case_( bart_model.model.encoder.__class__ ):
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : int ):
return super().call(UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : str = FakeEncoder(bart_model.config , bart_model.model.shared )
lowerCAmelCase : Union[str, Any] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowerCAmelCase : Optional[Any] = bart_model.generate(UpperCamelCase_ ).numpy()
with self.assertRaises(UpperCamelCase_ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(UpperCamelCase_ , foo='''bar''' )
| 637
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : int , UpperCamelCase_ : int ):
lowerCAmelCase : str = 3
lowerCAmelCase : Tuple = 2_5_0
lowerCAmelCase : Optional[Any] = ids_tensor((batch_size, length) , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 )
lowerCAmelCase : Union[str, Any] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=1_0 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : Any = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[Any] = MaxLengthCriteria(max_length=1_0 )
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : Dict = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 1_0 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self._get_tensors(5 )
lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self : str ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 )
lowerCAmelCase : str = validate_stopping_criteria(StoppingCriteriaList() , 1_1 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 637
| 1
|
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger("transformers.models.speecht5")
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ):
"""simple docstring"""
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,):
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float()
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCamelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( a_ , a_ , a_ , a_="attention"):
snake_case_ = params[f'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
snake_case_ = params[f'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
snake_case_ = params[f'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
snake_case_ = params[f'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def __UpperCAmelCase ( a_ , a_ , a_ , a_=False):
if split_mlp_wi:
snake_case_ = params[f'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
snake_case_ = params[f'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
snake_case_ = (wi_a, wi_a)
else:
snake_case_ = params[f'''{prefix}/layers_{i}/mlp/wi/kernel''']
snake_case_ = params[f'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def __UpperCAmelCase ( a_ , a_ , a_ , a_):
return params[f'''{prefix}/layers_{i}/{layer_name}/scale''']
def __UpperCAmelCase ( a_ , *, a_ , a_):
snake_case_ = traverse_util.flatten_dict(variables['target'])
snake_case_ = {'/'.join(a_): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
snake_case_ = 'encoder/layers_0/mlp/wi_0/kernel' in old
print('Split MLP:' , a_)
snake_case_ = collections.OrderedDict()
# Shared embeddings.
snake_case_ = old['token_embedder/embedding']
# Encoder.
for i in range(a_):
# Block i, layer 0 (Self Attention).
snake_case_ = tax_layer_norm_lookup(a_ , a_ , 'encoder' , 'pre_attention_layer_norm')
snake_case_ , snake_case_ , snake_case_ , snake_case_ = tax_attention_lookup(a_ , a_ , 'encoder' , 'attention')
snake_case_ = layer_norm
snake_case_ = k.T
snake_case_ = o.T
snake_case_ = q.T
snake_case_ = v.T
# Block i, layer 1 (MLP).
snake_case_ = tax_layer_norm_lookup(a_ , a_ , 'encoder' , 'pre_mlp_layer_norm')
snake_case_ , snake_case_ = tax_mlp_lookup(a_ , a_ , 'encoder' , a_)
snake_case_ = layer_norm
if split_mlp_wi:
snake_case_ = wi[0].T
snake_case_ = wi[1].T
else:
snake_case_ = wi.T
snake_case_ = wo.T
snake_case_ = old[
'encoder/relpos_bias/rel_embedding'
].T
snake_case_ = old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(a_):
# Block i, layer 0 (Self Attention).
snake_case_ = tax_layer_norm_lookup(a_ , a_ , 'decoder' , 'pre_self_attention_layer_norm')
snake_case_ , snake_case_ , snake_case_ , snake_case_ = tax_attention_lookup(a_ , a_ , 'decoder' , 'self_attention')
snake_case_ = layer_norm
snake_case_ = k.T
snake_case_ = o.T
snake_case_ = q.T
snake_case_ = v.T
# Block i, layer 1 (Cross Attention).
snake_case_ = tax_layer_norm_lookup(a_ , a_ , 'decoder' , 'pre_cross_attention_layer_norm')
snake_case_ , snake_case_ , snake_case_ , snake_case_ = tax_attention_lookup(a_ , a_ , 'decoder' , 'encoder_decoder_attention')
snake_case_ = layer_norm
snake_case_ = k.T
snake_case_ = o.T
snake_case_ = q.T
snake_case_ = v.T
# Block i, layer 2 (MLP).
snake_case_ = tax_layer_norm_lookup(a_ , a_ , 'decoder' , 'pre_mlp_layer_norm')
snake_case_ , snake_case_ = tax_mlp_lookup(a_ , a_ , 'decoder' , a_)
snake_case_ = layer_norm
if split_mlp_wi:
snake_case_ = wi[0].T
snake_case_ = wi[1].T
else:
snake_case_ = wi.T
snake_case_ = wo.T
snake_case_ = old['decoder/decoder_norm/scale']
snake_case_ = old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
snake_case_ = old['decoder/logits_dense/kernel'].T
return new
def __UpperCAmelCase ( a_ , a_):
snake_case_ = collections.OrderedDict([(k, torch.from_numpy(v.copy())) for (k, v) in converted_params.items()])
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
snake_case_ = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
snake_case_ = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.')
snake_case_ = state_dict['shared.weight']
return state_dict
def __UpperCAmelCase ( a_ , a_ , a_ , a_):
snake_case_ = checkpoints.load_tax_checkpoint(a_)
snake_case_ = convert_tax_to_pytorch(a_ , num_layers=config.num_layers , is_encoder_only=a_)
snake_case_ = make_state_dict(a_ , a_)
model.load_state_dict(a_ , strict=a_)
def __UpperCAmelCase ( a_ , a_ , a_ , a_ = False):
snake_case_ = TaConfig.from_json_file(a_)
print(f'''Building PyTorch model from configuration: {config}''')
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
snake_case_ = TaEncoderModel(a_)
else:
snake_case_ = TaForConditionalGeneration(a_)
# Load weights from tf checkpoint
load_tax_weights_in_ta(a_ , a_ , a_ , a_)
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''')
model.save_pretrained(a_)
# Verify that we can load the checkpoint.
model.from_pretrained(a_)
print('Done')
if __name__ == "__main__":
lowercase = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
lowercase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 198
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='blip_text_model'
def __init__( self : Any , a : Any=3_0524 , a : List[Any]=768 , a : List[Any]=768 , a : Optional[int]=3072 , a : Dict=768 , a : Optional[Any]=12 , a : str=8 , a : Tuple=512 , a : Dict="gelu" , a : List[Any]=1e-12 , a : Union[str, Any]=0.0 , a : List[str]=0.0 , a : Optional[Any]=0.02 , a : Optional[int]=3_0522 , a : Tuple=2 , a : Tuple=0 , a : Union[str, Any]=102 , a : Optional[int]=True , a : Any=True , **a : str , ) -> str:
"""simple docstring"""
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , sep_token_id=a , **a , )
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : int = encoder_hidden_size
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : str = projection_dim
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : str = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = is_decoder
SCREAMING_SNAKE_CASE : int = use_cache
@classmethod
def __UpperCamelCase ( cls : str , a : Union[str, os.PathLike] , **a : List[str] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(a )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = cls.get_config_dict(a , **a )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
SCREAMING_SNAKE_CASE : Optional[Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a , **a )
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='blip_vision_model'
def __init__( self : Union[str, Any] , a : Any=768 , a : Tuple=3072 , a : Dict=512 , a : Any=12 , a : Optional[Any]=12 , a : Any=384 , a : Tuple=16 , a : Dict="gelu" , a : Dict=1e-5 , a : Union[str, Any]=0.0 , a : Tuple=1e-10 , **a : Dict , ) -> Any:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = projection_dim
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : str = hidden_act
@classmethod
def __UpperCamelCase ( cls : int , a : Union[str, os.PathLike] , **a : List[Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(a )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = cls.get_config_dict(a , **a )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
SCREAMING_SNAKE_CASE : List[str] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a , **a )
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='blip'
lowerCamelCase__ =True
def __init__( self : List[Any] , a : Any=None , a : List[Any]=None , a : Any=512 , a : List[str]=2.6592 , a : str=256 , **a : List[str] , ) -> Dict:
"""simple docstring"""
super().__init__(**a )
if text_config is None:
SCREAMING_SNAKE_CASE : str = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE : List[str] = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
SCREAMING_SNAKE_CASE : Optional[Any] = BlipTextConfig(**a )
SCREAMING_SNAKE_CASE : Dict = BlipVisionConfig(**a )
SCREAMING_SNAKE_CASE : Any = self.vision_config.hidden_size
SCREAMING_SNAKE_CASE : List[Any] = projection_dim
SCREAMING_SNAKE_CASE : Dict = logit_scale_init_value
SCREAMING_SNAKE_CASE : str = 1.0
SCREAMING_SNAKE_CASE : Optional[int] = 0.02
SCREAMING_SNAKE_CASE : str = image_text_hidden_size
@classmethod
def __UpperCamelCase ( cls : Optional[int] , a : BlipTextConfig , a : BlipVisionConfig , **a : str ) -> List[Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : List[Any] = self.text_config.to_dict()
SCREAMING_SNAKE_CASE : int = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : Dict = self.__class__.model_type
return output
| 193
|
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = size
SCREAMING_SNAKE_CASE : Union[str, Any] = [0] * size
SCREAMING_SNAKE_CASE : Union[str, Any] = [0] * size
@staticmethod
def __UpperCamelCase ( a : int ) -> int:
"""simple docstring"""
return index | (index + 1)
@staticmethod
def __UpperCamelCase ( a : int ) -> int:
"""simple docstring"""
return (index & (index + 1)) - 1
def __UpperCamelCase ( self : Any , a : int , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = value
while index < self.size:
SCREAMING_SNAKE_CASE : Dict = self.get_prev(a ) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE : Optional[int] = value
else:
SCREAMING_SNAKE_CASE : Tuple = max(a , a , a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_next(a )
def __UpperCamelCase ( self : Optional[int] , a : int , a : int ) -> int:
"""simple docstring"""
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE : Optional[int] = 0
while left <= right:
SCREAMING_SNAKE_CASE : List[Any] = self.get_prev(a )
if left <= current_left:
SCREAMING_SNAKE_CASE : List[Any] = max(a , self.tree[right] )
SCREAMING_SNAKE_CASE : str = current_left
else:
SCREAMING_SNAKE_CASE : List[str] = max(a , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 193
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[str] = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 394
|
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__A : Optional[Any] = logging.getLogger(__name__)
def lowerCAmelCase_ ( ):
a__ = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=a , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=a , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=a , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=a , default=1000 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=a , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=a , type=a , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=a , default=512 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=a , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
a__ = parser.parse_args()
return args
def lowerCAmelCase_ ( a : int ):
def fn(a : int ):
return tokenizer(examples['text'] )
return fn
def lowerCAmelCase_ ( a : str ):
a__ = []
for i in range(len(tokenized_data['input_ids'] ) ):
a__ = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
a__ = tf.train.Features(feature=a )
a__ = tf.train.Example(features=a )
a__ = example.SerializeToString()
records.append(a )
return records
def lowerCAmelCase_ ( a : Any ):
a__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
a__ = min(len(a ) , args.limit )
a__ = dataset.select(range(a ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
a__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a__ = os.path.join(args.output_dir , args.split )
if not os.path.exists(a ):
os.makedirs(a )
else:
a__ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
a__ = tokenize_function(a )
a__ = dataset.map(a , batched=a , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(a : Optional[Any] ):
# Concatenate all texts.
a__ = {k: sum(examples[k] , [] ) for k in examples.keys()}
a__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a__ = {
k: [t[i : i + args.max_length] for i in range(0 , a , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a__ = dataset_tokenized.map(a , batched=a , batch_size=1000 , num_proc=4 )
a__ = 0
a__ = 0
for shard in range(0 , len(a ) , args.shard_size ):
a__ = grouped_dataset[shard : shard + args.shard_size]
a__ = len(dataset_snapshot['input_ids'] )
a__ = os.path.join(a , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
a__ = get_serialized_examples(a )
with tf.io.TFRecordWriter(a ) as out_file:
for i in range(len(a ) ):
a__ = serialized_examples[i]
out_file.write(a )
print('Wrote file {} containing {} records'.format(a , a ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , 'w' ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=a )
if __name__ == "__main__":
__A : str = parse_args()
main(args)
| 394
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __SCREAMING_SNAKE_CASE (__snake_case ):
"""simple docstring"""
def __init__( self : Tuple ):
_a = []
def UpperCamelCase__ ( self : Tuple , __a : Tuple , __a : Tuple , __a : Tuple , **__a : Any ):
self.events.append("on_init_end" )
def UpperCamelCase__ ( self : Optional[Any] , __a : List[str] , __a : Union[str, Any] , __a : Dict , **__a : Optional[Any] ):
self.events.append("on_train_begin" )
def UpperCamelCase__ ( self : Tuple , __a : Dict , __a : List[str] , __a : Optional[int] , **__a : List[Any] ):
self.events.append("on_train_end" )
def UpperCamelCase__ ( self : List[str] , __a : Optional[int] , __a : Union[str, Any] , __a : Optional[int] , **__a : Any ):
self.events.append("on_epoch_begin" )
def UpperCamelCase__ ( self : List[Any] , __a : Tuple , __a : Tuple , __a : Union[str, Any] , **__a : List[str] ):
self.events.append("on_epoch_end" )
def UpperCamelCase__ ( self : Union[str, Any] , __a : Dict , __a : Any , __a : Dict , **__a : str ):
self.events.append("on_step_begin" )
def UpperCamelCase__ ( self : Tuple , __a : Union[str, Any] , __a : Tuple , __a : Tuple , **__a : Tuple ):
self.events.append("on_step_end" )
def UpperCamelCase__ ( self : Any , __a : str , __a : int , __a : List[Any] , **__a : Optional[int] ):
self.events.append("on_evaluate" )
def UpperCamelCase__ ( self : List[Any] , __a : List[str] , __a : Tuple , __a : str , **__a : List[Any] ):
self.events.append("on_predict" )
def UpperCamelCase__ ( self : Any , __a : Union[str, Any] , __a : Any , __a : Tuple , **__a : Any ):
self.events.append("on_save" )
def UpperCamelCase__ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[int] , __a : List[str] , **__a : Union[str, Any] ):
self.events.append("on_log" )
def UpperCamelCase__ ( self : List[str] , __a : List[Any] , __a : Union[str, Any] , __a : Any , **__a : str ):
self.events.append("on_prediction_step" )
@require_torch
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Dict ):
_a = tempfile.mkdtemp()
def UpperCamelCase__ ( self : Optional[Any] ):
shutil.rmtree(self.output_dir )
def UpperCamelCase__ ( self : List[Any] , __a : Any=0 , __a : Dict=0 , __a : List[Any]=64 , __a : Optional[Any]=64 , __a : Optional[Any]=None , __a : List[str]=False , **__a : List[Any] ):
_a = RegressionDataset(length=A_ )
_a = RegressionDataset(length=A_ )
_a = RegressionModelConfig(a=A_ , b=A_ )
_a = RegressionPreTrainedModel(A_ )
_a = TrainingArguments(self.output_dir , disable_tqdm=A_ , report_to=[] , **A_ )
return Trainer(
A_ , A_ , train_dataset=A_ , eval_dataset=A_ , callbacks=A_ , )
def UpperCamelCase__ ( self : Union[str, Any] , __a : Tuple , __a : Tuple ):
self.assertEqual(len(A_ ) , len(A_ ) )
# Order doesn't matter
_a = sorted(A_ , key=lambda __a : cb.__name__ if isinstance(A_ , A_ ) else cb.__class__.__name__ )
_a = sorted(A_ , key=lambda __a : cb.__name__ if isinstance(A_ , A_ ) else cb.__class__.__name__ )
for cba, cba in zip(A_ , A_ ):
if isinstance(A_ , A_ ) and isinstance(A_ , A_ ):
self.assertEqual(A_ , A_ )
elif isinstance(A_ , A_ ) and not isinstance(A_ , A_ ):
self.assertEqual(A_ , cba.__class__ )
elif not isinstance(A_ , A_ ) and isinstance(A_ , A_ ):
self.assertEqual(cba.__class__ , A_ )
else:
self.assertEqual(A_ , A_ )
def UpperCamelCase__ ( self : Dict , __a : Optional[int] ):
_a = ["on_init_end", "on_train_begin"]
_a = 0
_a = len(trainer.get_eval_dataloader() )
_a = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(A_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def UpperCamelCase__ ( self : List[str] ):
_a = self.get_trainer()
_a = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
# Callbacks passed at init are added to the default callbacks
_a = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(A_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
_a = self.get_trainer(disable_tqdm=A_ )
_a = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
def UpperCamelCase__ ( self : Dict ):
_a = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
_a = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(A_ )
expected_callbacks.remove(A_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
_a = self.get_trainer()
_a = trainer.pop_callback(A_ )
self.assertEqual(cb.__class__ , A_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
trainer.add_callback(A_ )
expected_callbacks.insert(0 , A_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
# We can also add, pop, or remove by instance
_a = self.get_trainer()
_a = trainer.callback_handler.callbacks[0]
trainer.remove_callback(A_ )
expected_callbacks.remove(A_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
_a = self.get_trainer()
_a = trainer.callback_handler.callbacks[0]
_a = trainer.pop_callback(A_ )
self.assertEqual(A_ , A_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
trainer.add_callback(A_ )
expected_callbacks.insert(0 , A_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
def UpperCamelCase__ ( self : Tuple ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=A_ )
_a = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
_a = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A_ , self.get_expected_events(A_ ) )
# Independent log/save/eval
_a = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
_a = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A_ , self.get_expected_events(A_ ) )
_a = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
_a = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A_ , self.get_expected_events(A_ ) )
_a = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
_a = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A_ , self.get_expected_events(A_ ) )
_a = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
_a = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A_ , self.get_expected_events(A_ ) )
# A bit of everything
_a = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
_a = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A_ , self.get_expected_events(A_ ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
_a = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(A_ ) in warn_mock.call_args[0][0]
| 716
|
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( lowercase : str , lowercase : int , lowercase : Any , lowercase : Any ) -> Any:
# Initialise PyTorch model
_a = FunnelConfig.from_json_file(lowercase )
print(F'Building PyTorch model from configuration: {config}' )
_a = FunnelBaseModel(lowercase ) if base_model else FunnelModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowercase , lowercase , lowercase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 521
| 0
|
from __future__ import annotations
def UpperCamelCase ( _UpperCAmelCase : int | str ) -> bool:
'''simple docstring'''
_lowercase : str = str(_UpperCAmelCase )
return n == n[::-1]
def UpperCamelCase ( _UpperCAmelCase : int = 100_0000 ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = 0
for i in range(1 , _UpperCAmelCase ):
if is_palindrome(_UpperCAmelCase ) and is_palindrome(bin(_UpperCAmelCase ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 461
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __lowercase :
_A = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} )
_A = field(
default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
_A = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} )
_A = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
_A = field(default=2 , metadata={"help": "Batch size for training."} )
_A = field(default=2 , metadata={"help": "Batch size for evaluation."} )
_A = field(default=0.1 , metadata={"help": "Value of weight decay."} )
_A = field(
default=10000 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
_A = field(default=2e-4 , metadata={"help": "Learning rate fo training."} )
_A = field(default="cosine" , metadata={"help": "Learning rate."} )
_A = field(
default=750 , metadata={"help": "Number of warmup steps in the learning rate schedule."} )
_A = field(
default=16 , metadata={"help": "Number of gradient accumulation steps."} )
_A = field(
default=__snake_case , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
_A = field(default=50000 , metadata={"help": "Maximum number of training steps."} )
_A = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
_A = field(default=1024 , metadata={"help": "Sequence lengths used for training."} )
_A = field(default=1 , metadata={"help": "Training seed."} )
_A = field(
default=1024 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , )
_A = field(
default=__snake_case , metadata={"help": "States path if the training should continue from a checkpoint folder."} )
_A = field(default=__snake_case , metadata={"help": "If True the data is pretokenized."} )
@dataclass
class __lowercase :
_A = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
_A = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
_A = field(default=2 , metadata={"help": "Batch size used for evaluation."} )
_A = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
_A = field(default=1024 , metadata={"help": "Length of sequences to be evaluated."} )
_A = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
@dataclass
class __lowercase :
_A = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
_A = field(default=__snake_case , metadata={"help": "Number of workers used for code evaluation."} )
_A = field(
default=__snake_case , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , )
_A = field(
default=__snake_case , metadata={"help": "Sample from the language model's output distribution."} )
_A = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} )
_A = field(default=256 , metadata={"help": "Maximum number of newly generated tokens."} )
_A = field(default=0 , metadata={"help": "Top-k parameter used for generation."} )
_A = field(default=0.95 , metadata={"help": "Top-p parameter used for nucleus sampling."} )
_A = field(default=10 , metadata={"help": "Number of generations to run in parallel."} )
_A = field(
default=200 , metadata={"help": "Number of completions to generate for each sample."} )
_A = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
_A = field(
default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} )
_A = field(
default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
_A = field(
default=-1 , metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} , )
@dataclass
class __lowercase :
_A = field(
default=__snake_case , metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} , )
_A = field(
default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} )
_A = field(
default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} )
_A = field(
default=100000 , metadata={"help": "Number of files to save per JSON output file."} )
_A = field(default="content" , metadata={"help": "Column containing text data to process."} )
_A = field(
default=1000 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
_A = field(
default=100 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
_A = field(
default=0.25 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
_A = field(
default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
_A = field(
default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} )
_A = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , )
_A = field(
default=__snake_case , metadata={"help": "If True, near-duplicate samples are removed."} )
_A = field(
default=0.85 , metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class __lowercase :
_A = field(
default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} )
_A = field(
default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} )
_A = field(default="content" , metadata={"help": "Column containing text data to process."} )
_A = field(default=200000 , metadata={"help": "Number of examples to train tokenizer on."} )
_A = field(
default=32768 , metadata={"help": "Number of examples to train the tokenizer on."} )
_A = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} )
_A = field(default=__snake_case , metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class __lowercase :
_A = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} )
_A = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} )
_A = field(
default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} )
_A = field(default=__snake_case , metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class __lowercase :
_A = field(
default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} )
_A = field(
default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} )
_A = field(default="codeparrot" , metadata={"help": "Name of the created model."} )
_A = field(default=__snake_case , metadata={"help": "Push saved tokenizer to the hub."} )
| 461
| 1
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __a ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict ):
# Load configuration defined in the metadata file
with open(lowerCAmelCase__ ) as metadata_file:
a__ : Union[str, Any] = json.load(lowerCAmelCase__ )
a__ : str = LukeConfig(use_entity_aware_attention=lowerCAmelCase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
a__ : Tuple = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['''module''']
# Load the entity vocab file
a__ : str = load_original_entity_vocab(lowerCAmelCase__ )
# add an entry for [MASK2]
a__ : List[str] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a__ : Dict = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
a__ : Union[str, Any] = AddedToken('''<ent>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
a__ : Union[str, Any] = AddedToken('''<ent2>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''r''' ) as f:
a__ : Union[str, Any] = json.load(lowerCAmelCase__ )
a__ : List[str] = '''MLukeTokenizer'''
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple = MLukeTokenizer.from_pretrained(lowerCAmelCase__ )
# Initialize the embeddings of the special tokens
a__ : Optional[int] = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
a__ : str = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
a__ : Any = state_dict['''embeddings.word_embeddings.weight''']
a__ : str = word_emb[ent_init_index].unsqueeze(0 )
a__ : Optional[int] = word_emb[enta_init_index].unsqueeze(0 )
a__ : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a__ : Union[str, Any] = state_dict[bias_name]
a__ : Tuple = decoder_bias[ent_init_index].unsqueeze(0 )
a__ : Optional[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
a__ : List[str] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a__ : List[Any] = F'encoder.layer.{layer_index}.attention.self.'
a__ : str = state_dict[prefix + matrix_name]
a__ : Optional[Any] = state_dict[prefix + matrix_name]
a__ : Optional[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a__ : Union[str, Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
a__ : Union[str, Any] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
a__ : List[str] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a__ : List[Any] = state_dict['''entity_predictions.bias''']
a__ : str = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
a__ : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
a__ : Optional[Any] = LukeForMaskedLM(config=lowerCAmelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
a__ : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
a__ : Dict = state_dict[key]
else:
a__ : List[Any] = state_dict[key]
a__ : List[str] = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
if set(lowerCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(lowerCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a__ : Any = MLukeTokenizer.from_pretrained(lowerCAmelCase__ , task='''entity_classification''' )
a__ : Optional[int] = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
a__ : List[str] = (0, 9)
a__ : Tuple = tokenizer(lowerCAmelCase__ , entity_spans=[span] , return_tensors='''pt''' )
a__ : Tuple = model(**lowerCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a__ : Union[str, Any] = torch.Size((1, 33, 768) )
a__ : str = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a__ : Tuple = torch.Size((1, 1, 768) )
a__ : Any = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
a__ : List[Any] = MLukeTokenizer.from_pretrained(lowerCAmelCase__ )
a__ : Tuple = '''Tokyo is the capital of <mask>.'''
a__ : str = (24, 30)
a__ : Tuple = tokenizer(lowerCAmelCase__ , entity_spans=[span] , return_tensors='''pt''' )
a__ : Any = model(**lowerCAmelCase__ )
a__ : Optional[int] = encoding['''input_ids'''][0].tolist()
a__ : Dict = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
a__ : int = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCAmelCase__ )
a__ : Optional[int] = outputs.entity_logits[0][0].argmax().item()
a__ : Union[str, Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(lowerCAmelCase__ ) )
model.save_pretrained(lowerCAmelCase__ )
def __a ( lowerCAmelCase__ : str ):
a__ : int = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
a__ : Optional[Any] = [json.loads(lowerCAmelCase__ ) for line in open(lowerCAmelCase__ )]
a__ : Any = {}
for entry in data:
a__ : Tuple = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a__ : Union[str, Any] = entity_id
break
a__ : Dict = F'{language}:{entity_name}'
a__ : Dict = entity_id
return new_mapping
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 709
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __a ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict ):
# Load configuration defined in the metadata file
with open(lowerCAmelCase__ ) as metadata_file:
a__ : Union[str, Any] = json.load(lowerCAmelCase__ )
a__ : str = LukeConfig(use_entity_aware_attention=lowerCAmelCase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
a__ : Tuple = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['''module''']
# Load the entity vocab file
a__ : str = load_original_entity_vocab(lowerCAmelCase__ )
# add an entry for [MASK2]
a__ : List[str] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a__ : Dict = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
a__ : Union[str, Any] = AddedToken('''<ent>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
a__ : Union[str, Any] = AddedToken('''<ent2>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''r''' ) as f:
a__ : Union[str, Any] = json.load(lowerCAmelCase__ )
a__ : List[str] = '''MLukeTokenizer'''
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple = MLukeTokenizer.from_pretrained(lowerCAmelCase__ )
# Initialize the embeddings of the special tokens
a__ : Optional[int] = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
a__ : str = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
a__ : Any = state_dict['''embeddings.word_embeddings.weight''']
a__ : str = word_emb[ent_init_index].unsqueeze(0 )
a__ : Optional[int] = word_emb[enta_init_index].unsqueeze(0 )
a__ : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a__ : Union[str, Any] = state_dict[bias_name]
a__ : Tuple = decoder_bias[ent_init_index].unsqueeze(0 )
a__ : Optional[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
a__ : List[str] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a__ : List[Any] = F'encoder.layer.{layer_index}.attention.self.'
a__ : str = state_dict[prefix + matrix_name]
a__ : Optional[Any] = state_dict[prefix + matrix_name]
a__ : Optional[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a__ : Union[str, Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
a__ : Union[str, Any] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
a__ : List[str] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a__ : List[Any] = state_dict['''entity_predictions.bias''']
a__ : str = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
a__ : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
a__ : Optional[Any] = LukeForMaskedLM(config=lowerCAmelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
a__ : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
a__ : Dict = state_dict[key]
else:
a__ : List[Any] = state_dict[key]
a__ , a__ : List[str] = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
if set(lowerCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(lowerCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a__ : Any = MLukeTokenizer.from_pretrained(lowerCAmelCase__ , task='''entity_classification''' )
a__ : Optional[int] = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
a__ : List[str] = (0, 9)
a__ : Tuple = tokenizer(lowerCAmelCase__ , entity_spans=[span] , return_tensors='''pt''' )
a__ : Tuple = model(**lowerCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a__ : Union[str, Any] = torch.Size((1, 33, 768) )
a__ : str = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a__ : Tuple = torch.Size((1, 1, 768) )
a__ : Any = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
a__ : List[Any] = MLukeTokenizer.from_pretrained(lowerCAmelCase__ )
a__ : Tuple = '''Tokyo is the capital of <mask>.'''
a__ : str = (24, 30)
a__ : Tuple = tokenizer(lowerCAmelCase__ , entity_spans=[span] , return_tensors='''pt''' )
a__ : Any = model(**lowerCAmelCase__ )
a__ : Optional[int] = encoding['''input_ids'''][0].tolist()
a__ : Dict = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
a__ : int = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCAmelCase__ )
a__ : Optional[int] = outputs.entity_logits[0][0].argmax().item()
a__ : Union[str, Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(lowerCAmelCase__ ) )
model.save_pretrained(lowerCAmelCase__ )
def __a ( lowerCAmelCase__ : str ):
a__ : int = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
a__ : Optional[Any] = [json.loads(lowerCAmelCase__ ) for line in open(lowerCAmelCase__ )]
a__ : Any = {}
for entry in data:
a__ : Tuple = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a__ : Union[str, Any] = entity_id
break
a__ : Dict = F'{language}:{entity_name}'
a__ : Dict = entity_id
return new_mapping
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 340
| 0
|
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( __A ):
_lowerCAmelCase : Union[str, Any] = (PNDMScheduler,)
_lowerCAmelCase : List[str] = (('num_inference_steps', 5_0),)
def __lowercase ( self : List[Any] , **lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowerCAmelCase__ )
return config
def __lowercase ( self : Optional[int] , lowerCAmelCase__ : List[str]=0 , **lowerCAmelCase__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('''num_inference_steps''' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = self.dummy_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1 * sample
SCREAMING_SNAKE_CASE : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE : str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class.from_pretrained(lowerCAmelCase__ )
new_scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE : Optional[Any] = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE : Dict = scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
SCREAMING_SNAKE_CASE : int = new_scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE : List[Any] = scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = new_scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
pass
def __lowercase ( self : List[str] , lowerCAmelCase__ : Optional[int]=0 , **lowerCAmelCase__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''num_inference_steps''' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1 * sample
SCREAMING_SNAKE_CASE : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE : Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = scheduler_class.from_pretrained(lowerCAmelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE : int = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = new_scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE : str = scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
SCREAMING_SNAKE_CASE : List[str] = new_scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowercase ( self : Optional[int] , **lowerCAmelCase__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = scheduler_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = 10
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
return sample
def __lowercase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE : int = kwargs.pop('''num_inference_steps''' , lowerCAmelCase__ )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : str = scheduler_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample
SCREAMING_SNAKE_CASE : Any = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCAmelCase__ , '''set_timesteps''' ):
scheduler.set_timesteps(lowerCAmelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCAmelCase__ , '''set_timesteps''' ):
SCREAMING_SNAKE_CASE : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
SCREAMING_SNAKE_CASE : Dict = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE : Any = scheduler.step_prk(lowerCAmelCase__ , 0 , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
SCREAMING_SNAKE_CASE : str = scheduler.step_prk(lowerCAmelCase__ , 1 , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
SCREAMING_SNAKE_CASE : List[str] = scheduler.step_plms(lowerCAmelCase__ , 0 , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
SCREAMING_SNAKE_CASE : int = scheduler.step_plms(lowerCAmelCase__ , 1 , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowercase ( self : Dict ):
"""simple docstring"""
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def __lowercase ( self : Dict ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def __lowercase ( self : Any ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCAmelCase__ )
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=lowerCAmelCase__ )
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 27
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE : str = self.dummy_sample
SCREAMING_SNAKE_CASE : Tuple = 0.1 * sample
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
SCREAMING_SNAKE_CASE : Dict = scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
with self.assertRaises(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.full_loop()
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def __lowercase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.full_loop(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : List[str] = torch.sum(torch.abs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE : List[str] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def __lowercase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.full_loop(set_alpha_to_one=lowerCAmelCase__ , beta_start=0.01 )
SCREAMING_SNAKE_CASE : List[str] = torch.sum(torch.abs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def __lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.full_loop(set_alpha_to_one=lowerCAmelCase__ , beta_start=0.01 )
SCREAMING_SNAKE_CASE : Optional[int] = torch.sum(torch.abs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3
| 527
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE : Union[str, Any] = False
return options
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Optional[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Tuple = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=10 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
SCREAMING_SNAKE_CASE : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : List[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Dict = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=20 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[str] = output.images
SCREAMING_SNAKE_CASE : Optional[int] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 25
| 0
|
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
UpperCAmelCase__ : str = precision
UpperCAmelCase__ : List[Any] = ceil(precision / 14 )
UpperCAmelCase__ : List[str] = 426880 * Decimal(10005 ).sqrt()
UpperCAmelCase__ : List[Any] = 1
UpperCAmelCase__ : List[Any] = 13591409
UpperCAmelCase__ : Optional[int] = Decimal(__UpperCamelCase )
for k in range(1 , __UpperCamelCase ):
UpperCAmelCase__ : Any = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCamelCase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__UpperCAmelCase = 50
print(F"The first {n} digits of pi is: {pi(n)}")
| 718
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class __lowercase :
def __init__( self : Optional[int] ,A : int ,A : int ,A : float = 0 ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = row, column
UpperCAmelCase__ : int = [[default_value for c in range(A )] for r in range(A )]
def __str__( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
UpperCAmelCase__ : Optional[Any] = 0
for row_vector in self.array:
for obj in row_vector:
UpperCAmelCase__ : Optional[int] = max(A ,len(str(A ) ) )
UpperCAmelCase__ : int = f"%{max_element_length}s"
# Make string and return
def single_line(A : list[float] ) -> str:
nonlocal string_format_identifier
UpperCAmelCase__ : List[str] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(A ) for row_vector in self.array )
return s
def __repr__( self : Dict ):
'''simple docstring'''
return str(self )
def __lowercase ( self : int ,A : tuple[int, int] ):
'''simple docstring'''
if not (isinstance(A ,(list, tuple) ) and len(A ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Optional[int] ,A : tuple[int, int] ):
'''simple docstring'''
assert self.validate_indicies(A )
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[Any] ,A : tuple[int, int] ,A : float ):
'''simple docstring'''
assert self.validate_indicies(A )
UpperCAmelCase__ : str = value
def __add__( self : Any ,A : Matrix ):
'''simple docstring'''
assert isinstance(A ,A )
assert self.row == another.row and self.column == another.column
# Add
UpperCAmelCase__ : Optional[int] = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase__ : Union[str, Any] = self[r, c] + another[r, c]
return result
def __neg__( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase__ : int = -self[r, c]
return result
def __sub__( self : str ,A : Matrix ):
'''simple docstring'''
return self + (-another)
def __mul__( self : Union[str, Any] ,A : int | float | Matrix ):
'''simple docstring'''
if isinstance(A ,(int, float) ): # Scalar multiplication
UpperCAmelCase__ : Dict = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase__ : Any = self[r, c] * another
return result
elif isinstance(A ,A ): # Matrix multiplication
assert self.column == another.row
UpperCAmelCase__ : Optional[int] = Matrix(self.row ,another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
UpperCAmelCase__ : Union[str, Any] = f"Unsupported type given for another ({type(A )})"
raise TypeError(A )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = Matrix(self.column ,self.row )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase__ : Optional[int] = self[r, c]
return result
def __lowercase ( self : Any ,A : Matrix ,A : Matrix ):
'''simple docstring'''
assert isinstance(A ,A ) and isinstance(A ,A )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
UpperCAmelCase__ : Optional[Any] = v.transpose()
UpperCAmelCase__ : Union[str, Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = Matrix(3 , 3 , 0 )
for i in range(3 ):
UpperCAmelCase__ : List[str] = 1
print(F"a^(-1) is {ainv}" )
# u, v
UpperCAmelCase__ : List[Any] = Matrix(3 , 1 , 0 )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = 1, 2, -3
UpperCAmelCase__ : List[str] = Matrix(3 , 1 , 0 )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(__UpperCamelCase , __UpperCamelCase )}" )
def lowerCAmelCase ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 194
| 0
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
A_: Dict = logging.get_logger(__name__)
def __lowerCAmelCase ( _A ,_A ,_A ):
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __lowerCAmelCase ( _A ,_A ,_A = None ):
"""simple docstring"""
_lowercase = tesseract_config if tesseract_config is not None else """"""
# apply OCR
_lowercase = to_pil_image(_A )
_lowercase , _lowercase = pil_image.size
_lowercase = pytesseract.image_to_data(_A ,lang=_A ,output_type="""dict""" ,config=_A )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
_lowercase = [idx for idx, word in enumerate(_A ) if not word.strip()]
_lowercase = [word for idx, word in enumerate(_A ) if idx not in irrelevant_indices]
_lowercase = [coord for idx, coord in enumerate(_A ) if idx not in irrelevant_indices]
_lowercase = [coord for idx, coord in enumerate(_A ) if idx not in irrelevant_indices]
_lowercase = [coord for idx, coord in enumerate(_A ) if idx not in irrelevant_indices]
_lowercase = [coord for idx, coord in enumerate(_A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_lowercase = []
for x, y, w, h in zip(_A ,_A ,_A ,_A ):
_lowercase = [x, y, x + w, y + h]
actual_boxes.append(_A )
# finally, normalize the bounding boxes
_lowercase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_A ,_A ,_A ) )
assert len(_A ) == len(_A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = ['pixel_values']
def __init__( self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = "" , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase )
_lowercase = size if size is not None else {"""height""": 224, """width""": 224}
_lowercase = get_size_dict(UpperCAmelCase )
_lowercase = do_resize
_lowercase = size
_lowercase = resample
_lowercase = apply_ocr
_lowercase = ocr_lang
_lowercase = tesseract_config
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = None , **UpperCAmelCase , ):
'''simple docstring'''
_lowercase = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
_lowercase = (size["""height"""], size["""width"""])
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ):
'''simple docstring'''
_lowercase = do_resize if do_resize is not None else self.do_resize
_lowercase = size if size is not None else self.size
_lowercase = get_size_dict(UpperCAmelCase )
_lowercase = resample if resample is not None else self.resample
_lowercase = apply_ocr if apply_ocr is not None else self.apply_ocr
_lowercase = ocr_lang if ocr_lang is not None else self.ocr_lang
_lowercase = tesseract_config if tesseract_config is not None else self.tesseract_config
_lowercase = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
_lowercase = [to_numpy_array(UpperCAmelCase ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
_lowercase = []
_lowercase = []
for image in images:
_lowercase , _lowercase = apply_tesseract(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
words_batch.append(UpperCAmelCase )
boxes_batch.append(UpperCAmelCase )
if do_resize:
_lowercase = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_lowercase = [flip_channel_order(UpperCAmelCase ) for image in images]
_lowercase = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
_lowercase = BatchFeature(data={"""pixel_values""": images} , tensor_type=UpperCAmelCase )
if apply_ocr:
_lowercase = words_batch
_lowercase = boxes_batch
return data
| 398
|
import numpy as np
def __lowerCAmelCase ( _A ,_A ,_A = 1E-12 ,_A = 100 ,):
"""simple docstring"""
assert np.shape(_A )[0] == np.shape(_A )[1]
# Ensure proper dimensionality.
assert np.shape(_A )[0] == np.shape(_A )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_A ) == np.iscomplexobj(_A )
_lowercase = np.iscomplexobj(_A )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_A ,input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_lowercase = False
_lowercase = 0
_lowercase = 0
_lowercase = 1E12
while not convergence:
# Multiple matrix by the vector.
_lowercase = np.dot(_A ,_A )
# Normalize the resulting output vector.
_lowercase = w / np.linalg.norm(_A )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_lowercase = vector.conj().T if is_complex else vector.T
_lowercase = np.dot(_A ,np.dot(_A ,_A ) )
# Check convergence.
_lowercase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_lowercase = True
_lowercase = lambda_
if is_complex:
_lowercase = np.real(lambda_ )
return lambda_, vector
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_lowercase = np.array([41, 4, 20] )
_lowercase = real_input_matrix.astype(np.complexaaa )
_lowercase = np.triu(1J * complex_input_matrix ,1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_lowercase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_lowercase = real_input_matrix
_lowercase = real_vector
elif problem_type == "complex":
_lowercase = complex_input_matrix
_lowercase = complex_vector
# Our implementation.
_lowercase , _lowercase = power_iteration(_A ,_A )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_lowercase , _lowercase = np.linalg.eigh(_A )
# Last eigenvalue is the maximum one.
_lowercase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_lowercase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_A ) - np.abs(_A ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 398
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : int = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : str = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
__UpperCAmelCase : Union[str, Any] = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
__UpperCAmelCase : Any = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
__UpperCamelCase : Any = MBartTokenizer
__UpperCamelCase : List[int] = []
__UpperCamelCase : List[int] = []
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = vocab_file
UpperCamelCase : List[str] = False if not self.vocab_file else True
UpperCamelCase : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
UpperCamelCase : List[Any] = {
lang_code: self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase : Dict = src_lang if src_lang is not None else '''en_XX'''
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : str = [self.sep_token_id]
UpperCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase : List[str] = src_lang
UpperCamelCase : Dict = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = tgt_lang_id
return inputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = src_lang
UpperCamelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = []
UpperCamelCase : Dict = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : int = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = []
UpperCamelCase : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCamelCase : Optional[int] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 709
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__UpperCAmelCase : Optional[int] = 500000
__UpperCAmelCase , __UpperCAmelCase : Any = os.path.split(__file__)
__UpperCAmelCase : int = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase : Tuple = dataset.map(**SCREAMING_SNAKE_CASE_ )
@get_duration
def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
UpperCamelCase : int = dataset.filter(**SCREAMING_SNAKE_CASE_ )
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[int] = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase : Dict = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
UpperCamelCase : List[str] = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE_ , '''dataset.arrow''' ) , SCREAMING_SNAKE_CASE_ , num_examples=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=SCREAMING_SNAKE_CASE_ )
def tokenize(SCREAMING_SNAKE_CASE_ : Dict ):
return tokenizer(examples['''text'''] )
UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''numpy''' ):
UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''pandas''' ):
UpperCamelCase : int = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
UpperCamelCase : Dict = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , function=SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = filter(SCREAMING_SNAKE_CASE_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 643
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = '''▁'''
UpperCAmelCase_ : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
UpperCAmelCase_ : List[Any] = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
UpperCAmelCase_ : Any = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
UpperCAmelCase_ : Optional[Any] = {
'''ernie-m-base''': 5_1_4,
'''ernie-m-large''': 5_1_4,
}
UpperCAmelCase_ : List[Any] = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : List[str] = ["input_ids"]
lowercase : Tuple = VOCAB_FILES_NAMES
lowercase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] = RESOURCE_FILES_NAMES
def __init__( self , _A , _A=None , _A=False , _A="utf8" , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A = None , **_A , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , vocab_file=_A , encoding=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
_SCREAMING_SNAKE_CASE =do_lower_case
_SCREAMING_SNAKE_CASE =sentencepiece_model_ckpt
_SCREAMING_SNAKE_CASE =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
_SCREAMING_SNAKE_CASE =self.load_vocab(filepath=_A )
else:
_SCREAMING_SNAKE_CASE ={self.sp_model.id_to_piece(_A ): id for id in range(self.sp_model.get_piece_size() )}
_SCREAMING_SNAKE_CASE ={v: k for k, v in self.vocab.items()}
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
if text is None:
return None
_SCREAMING_SNAKE_CASE =self.tokenize(_A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ='''''', []
for i, ch in enumerate(_A ):
if ch in self.SP_CHAR_MAPPING:
_SCREAMING_SNAKE_CASE =self.SP_CHAR_MAPPING.get(_A )
else:
_SCREAMING_SNAKE_CASE =unicodedata.normalize('''NFKC''' , _A )
if self.is_whitespace(_A ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_A ) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =normalized_text, [], 0
if self.do_lower_case:
_SCREAMING_SNAKE_CASE =text.lower()
for token in split_tokens:
if token[:1] == "▁":
_SCREAMING_SNAKE_CASE =token[1:]
_SCREAMING_SNAKE_CASE =text[offset:].index(_A ) + offset
_SCREAMING_SNAKE_CASE =start + len(_A )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
_SCREAMING_SNAKE_CASE =end
return token_mapping
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return len(self.vocab )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.__dict__.copy()
_SCREAMING_SNAKE_CASE =None
return state
def __setstate__( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(_A , _A ) for c in text) )
def UpperCamelCase_ ( self , _A , _A=False , _A=6_4 , _A=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
_SCREAMING_SNAKE_CASE =True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
_SCREAMING_SNAKE_CASE =self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
_SCREAMING_SNAKE_CASE =self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
_SCREAMING_SNAKE_CASE =self.sp_model.EncodeAsPieces(_A )
else:
_SCREAMING_SNAKE_CASE =self.sp_model.SampleEncodeAsPieces(_A , _A , _A )
_SCREAMING_SNAKE_CASE =[]
for pi, piece in enumerate(_A ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_A ) and pi != 0:
new_pieces.append(_A )
continue
else:
continue
_SCREAMING_SNAKE_CASE =0
for i, chunk in enumerate(_A ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_A ) or self.is_punct(_A ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_A )
_SCREAMING_SNAKE_CASE =i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_SCREAMING_SNAKE_CASE =i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_SCREAMING_SNAKE_CASE =i
if len(_A ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =''''''.join(_A ).replace(_A , ''' ''' ).strip()
return out_string
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.convert_ids_to_tokens(_A )
_SCREAMING_SNAKE_CASE =''''''.join(_A ).replace(_A , ''' ''' ).strip()
return out_string
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
return self.vocab.get(_A , self.vocab.get(self.unk_token ) )
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
return self.reverse_vocab.get(_A , self.unk_token )
def UpperCamelCase_ ( self , _A , _A=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCamelCase_ ( self , _A , _A=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCamelCase_ ( self , _A , _A=None , _A=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1]
def UpperCamelCase_ ( self , _A , _A = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_A ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_A ) + 1) + [1] * (len(_A ) + 3)
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_A ) == 1:
_SCREAMING_SNAKE_CASE =unicodedata.category(_A )
if cat == "Zs":
return True
return False
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={}
with io.open(_A , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(_A ):
_SCREAMING_SNAKE_CASE =line.rstrip('''\n''' )
_SCREAMING_SNAKE_CASE =int(_A )
return token_to_idx
def UpperCamelCase_ ( self , _A , _A = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =0
if os.path.isdir(_A ):
_SCREAMING_SNAKE_CASE =os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
_SCREAMING_SNAKE_CASE =(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
_SCREAMING_SNAKE_CASE =token_index
writer.write(token + '''\n''' )
index += 1
_SCREAMING_SNAKE_CASE =os.path.join(_A , '''sentencepiece.bpe.model''' )
with open(_A , '''wb''' ) as fi:
_SCREAMING_SNAKE_CASE =self.sp_model.serialized_model_proto()
fi.write(_A )
return (vocab_file,)
| 255
| 0
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
'''simple docstring'''
@staticmethod
def a_ ( *__lowerCAmelCase : Any , **__lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class A (unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def a_ ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = ObjectDetectionPipeline(model=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def a_ ( self : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ) -> Any:
"""simple docstring"""
A__ = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) , 0 )
for detected_object in outputs:
self.assertEqual(
_SCREAMING_SNAKE_CASE , {
"""score""": ANY(_SCREAMING_SNAKE_CASE ),
"""label""": ANY(_SCREAMING_SNAKE_CASE ),
"""box""": {"""xmin""": ANY(_SCREAMING_SNAKE_CASE ), """ymin""": ANY(_SCREAMING_SNAKE_CASE ), """xmax""": ANY(_SCREAMING_SNAKE_CASE ), """ymax""": ANY(_SCREAMING_SNAKE_CASE )},
} , )
import datasets
A__ = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
A__ = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
A__ = object_detector(_SCREAMING_SNAKE_CASE , threshold=0.0 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for outputs in batch_outputs:
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) , 0 )
for detected_object in outputs:
self.assertEqual(
_SCREAMING_SNAKE_CASE , {
"""score""": ANY(_SCREAMING_SNAKE_CASE ),
"""label""": ANY(_SCREAMING_SNAKE_CASE ),
"""box""": {"""xmin""": ANY(_SCREAMING_SNAKE_CASE ), """ymin""": ANY(_SCREAMING_SNAKE_CASE ), """xmax""": ANY(_SCREAMING_SNAKE_CASE ), """ymax""": ANY(_SCREAMING_SNAKE_CASE )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def a_ ( self : Any ) -> int:
"""simple docstring"""
pass
@require_torch
def a_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A__ = """hf-internal-testing/tiny-detr-mobilenetsv3"""
A__ = AutoModelForObjectDetection.from_pretrained(_SCREAMING_SNAKE_CASE )
A__ = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
A__ = ObjectDetectionPipeline(model=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
A__ = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}},
] , )
A__ = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}},
],
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}},
],
] , )
@require_torch
@slow
def a_ ( self : Dict ) -> List[str]:
"""simple docstring"""
A__ = """facebook/detr-resnet-50"""
A__ = AutoModelForObjectDetection.from_pretrained(_SCREAMING_SNAKE_CASE )
A__ = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
A__ = ObjectDetectionPipeline(model=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
A__ = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
] , )
A__ = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
],
] , )
@require_torch
@slow
def a_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
A__ = """facebook/detr-resnet-50"""
A__ = pipeline("""object-detection""" , model=_SCREAMING_SNAKE_CASE )
A__ = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
] , )
A__ = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
],
] , )
@require_torch
@slow
def a_ ( self : str ) -> int:
"""simple docstring"""
A__ = 0.9_9_8_5
A__ = """facebook/detr-resnet-50"""
A__ = pipeline("""object-detection""" , model=_SCREAMING_SNAKE_CASE )
A__ = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=_SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def a_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
A__ = """Narsil/layoutlmv3-finetuned-funsd"""
A__ = 0.9_9_9_3
A__ = pipeline("""object-detection""" , model=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE )
A__ = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 2_94, """ymin""": 2_54, """xmax""": 3_43, """ymax""": 2_64}},
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 2_94, """ymin""": 2_54, """xmax""": 3_43, """ymax""": 2_64}},
] , )
| 713
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A : Dict = datasets.logging.get_logger(__name__)
A : Optional[Any] = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
A : int = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
A : Union[str, Any] = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def __lowerCamelCase ( __a :Dict , __a :int , __a :int=False , __a :Optional[Any]=False , __a :int=True , __a :Optional[int]=False , __a :Dict="dummy_doc" ) -> Any:
"""simple docstring"""
A__ = {doc: key_lines}
A__ = {doc: sys_lines}
A__ = {}
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ , A__ = reader.get_doc_mentions(__a , key_doc_lines[doc] , __a )
key_singletons_num += singletons_num
if NP_only or min_span:
A__ = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a )
A__ , A__ = reader.get_doc_mentions(__a , sys_doc_lines[doc] , __a )
sys_singletons_num += singletons_num
if NP_only or min_span:
A__ = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a )
if remove_nested:
A__ , A__ = reader.remove_nested_coref_mentions(__a , __a )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
A__ , A__ = reader.remove_nested_coref_mentions(__a , __a )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
A__ = reader.get_mention_assignments(__a , __a )
A__ = reader.get_mention_assignments(__a , __a )
A__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
"""Number of resulting singleton clusters in the key """
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
"""files, respectively""" )
return doc_coref_infos
def __lowerCamelCase ( __a :Any , __a :Union[str, Any] , __a :List[str] , __a :Dict , __a :str , __a :Tuple , __a :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
A__ = get_coref_infos(__a , __a , __a , __a , __a , __a )
A__ = {}
A__ = 0
A__ = 0
for name, metric in metrics:
A__ , A__ , A__ = evaluator.evaluate_documents(__a , __a , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(1_0 ) , F'Recall: {recall * 1_0_0:.2f}' , F' Precision: {precision * 1_0_0:.2f}' , F' F1: {fa * 1_0_0:.2f}' , )
if conll_subparts_num == 3:
A__ = (conll / 3) * 1_0_0
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def __lowerCamelCase ( __a :int ) -> List[Any]:
"""simple docstring"""
A__ = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
A__ = line.split()[5]
if not parse_col == "-":
A__ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A (datasets.Metric ):
'''simple docstring'''
def a_ ( self : int ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def a_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : int=False ) -> Optional[int]:
"""simple docstring"""
A__ = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
A__ = util.check_gold_parse_annotation(__lowerCAmelCase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
A__ = evaluate(
key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , )
return score
| 247
| 0
|
'''simple docstring'''
import qiskit
def __snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
__UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
__UpperCAmelCase = qiskit.QuantumCircuit(lowerCAmelCase , lowerCAmelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__UpperCAmelCase = qiskit.execute(lowerCAmelCase , lowerCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCAmelCase )
if __name__ == "__main__":
print(f"Total count for various states are: {single_qubit_measure(1, 1)}")
| 396
|
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_UpperCamelCase : Dict = logging.get_logger(__name__)
def __snake_case ( lowerCAmelCase : bool , lowerCAmelCase : bool ):
def run_func(lowerCAmelCase : List[Any] ):
@wraps(lowerCAmelCase )
def run_in_eager_mode(*lowerCAmelCase : Dict , **lowerCAmelCase : Dict ):
return func(*lowerCAmelCase , **lowerCAmelCase )
@wraps(lowerCAmelCase )
@tf.function(experimental_compile=lowerCAmelCase )
def run_in_graph_mode(*lowerCAmelCase : Any , **lowerCAmelCase : Union[str, Any] ):
return func(*lowerCAmelCase , **lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
__UpperCAmelCase = random.Random()
__UpperCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _lowercase( _lowerCamelCase ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = "TensorFlow"
@property
def snake_case ( self: Optional[Any] ):
return tf.__version__
def snake_case ( self: List[Any] ,a: str ,a: int ,a: int ):
# initialize GPU on separate process
__UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__UpperCAmelCase = self._prepare_inference_func(a ,a ,a )
return self._measure_speed(_inference )
def snake_case ( self: int ,a: str ,a: int ,a: int ):
__UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__UpperCAmelCase = self._prepare_train_func(a ,a ,a )
return self._measure_speed(_train )
def snake_case ( self: int ,a: str ,a: int ,a: int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] ,a )
__UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__UpperCAmelCase = self._prepare_inference_func(a ,a ,a )
return self._measure_memory(_inference )
def snake_case ( self: Tuple ,a: str ,a: int ,a: int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] ,a )
__UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__UpperCAmelCase = self._prepare_train_func(a ,a ,a )
return self._measure_memory(_train )
def snake_case ( self: Dict ,a: str ,a: int ,a: int ):
__UpperCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
__UpperCAmelCase = (
hasattr(a ,'architectures' )
and isinstance(config.architectures ,a )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__UpperCAmelCase = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
__UpperCAmelCase = __import__('transformers' ,fromlist=[model_class] )
__UpperCAmelCase = getattr(a ,a )
__UpperCAmelCase = model_cls(a )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
__UpperCAmelCase = TF_MODEL_MAPPING[config.__class__](a )
# encoder-decoder has vocab size saved differently
__UpperCAmelCase = config.vocab_size if hasattr(a ,'vocab_size' ) else config.encoder.vocab_size
__UpperCAmelCase = random_input_ids(a ,a ,a )
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_decoder_forward():
return model(a ,decoder_input_ids=a ,training=a )
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_forward():
return model(a ,training=a )
__UpperCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def snake_case ( self: List[Any] ,a: str ,a: int ,a: int ):
__UpperCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
__UpperCAmelCase = (
hasattr(a ,'architectures' )
and isinstance(config.architectures ,a )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__UpperCAmelCase = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
__UpperCAmelCase = __import__('transformers' ,fromlist=[model_class] )
__UpperCAmelCase = getattr(a ,a )
__UpperCAmelCase = model_cls(a )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
__UpperCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a )
# encoder-decoder has vocab size saved differently
__UpperCAmelCase = config.vocab_size if hasattr(a ,'vocab_size' ) else config.encoder.vocab_size
__UpperCAmelCase = random_input_ids(a ,a ,a )
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_decoder_train():
__UpperCAmelCase = model(a ,decoder_input_ids=a ,labels=a ,training=a )[0]
__UpperCAmelCase = tf.gradients(a ,model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_train():
__UpperCAmelCase = model(a ,labels=a ,training=a )[0]
__UpperCAmelCase = tf.gradients(a ,model.trainable_variables )
return gradients
__UpperCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def snake_case ( self: int ,a: Union[str, Any] ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(a ,repeat=1 ,number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__UpperCAmelCase = timeit.repeat(
a ,repeat=self.args.repeat ,number=10 ,)
return min(a ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def snake_case ( self: List[str] ,a: Callable[[], None] ):
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
__UpperCAmelCase = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
__UpperCAmelCase = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
__UpperCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__UpperCAmelCase = nvml.nvmlDeviceGetMemoryInfo(a )
__UpperCAmelCase = meminfo.used
__UpperCAmelCase = Memory(a )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
__UpperCAmelCase = None
else:
__UpperCAmelCase = measure_peak_memory_cpu(a )
__UpperCAmelCase = Memory(a ) if isinstance(a ,a ) else memory_bytes
if self.args.trace_memory_line_by_line:
__UpperCAmelCase = stop_memory_tracing(a )
if memory is None:
__UpperCAmelCase = summary.total
else:
__UpperCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 396
| 1
|
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class snake_case__ :
def __init__( self : Optional[Any] , _A : Dict , _A : List[Any]=1_00 , _A : Optional[int]=13 , _A : Dict=30 , _A : List[str]=2 , _A : List[str]=3 , _A : int=True , _A : Union[str, Any]=True , _A : Tuple=32 , _A : Any=4 , _A : Optional[Any]=4 , _A : Any=37 , _A : Any="gelu" , _A : Any=0.1 , _A : Tuple=0.1 , _A : Optional[Any]=10 , _A : Any=0.02 , _A : str=3 , _A : Tuple=None , _A : Optional[Any]=[0, 1, 2, 3] , ) -> Any:
UpperCAmelCase_ : Optional[int] = parent
UpperCAmelCase_ : str = 1_00
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : Optional[Any] = image_size
UpperCAmelCase_ : int = patch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : List[str] = is_training
UpperCAmelCase_ : str = use_labels
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = type_sequence_label_size
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = scope
UpperCAmelCase_ : Union[str, Any] = out_indices
UpperCAmelCase_ : Optional[int] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ : Optional[int] = (image_size // patch_size) ** 2
UpperCAmelCase_ : Optional[int] = num_patches + 1
def A ( self : List[str] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Any = None
if self.use_labels:
UpperCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def A ( self : List[str] ) -> List[Any]:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def A ( self : Optional[int] , _A : Tuple , _A : List[Any] , _A : Optional[int] , _A : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = BeitModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Dict , _A : Optional[int] , _A : Any , _A : List[Any] , _A : int ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = BeitForMaskedImageModeling(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Dict = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def A ( self : Optional[int] , _A : List[Any] , _A : Dict , _A : Optional[Any] , _A : List[str] ) -> str:
UpperCAmelCase_ : str = self.type_sequence_label_size
UpperCAmelCase_ : List[Any] = BeitForImageClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : int = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : Optional[Any] = BeitForImageClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : List[Any] , _A : Optional[int] , _A : List[Any] , _A : str , _A : Optional[int] ) -> Any:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Optional[int] = BeitForSemanticSegmentation(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : List[str] = model(_A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCAmelCase_ : Any = model(_A , labels=_A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Any = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = config_and_inputs
UpperCAmelCase_ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
a_ = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
def A ( self : int ) -> Dict:
UpperCAmelCase_ : List[str] = BeitModelTester(self )
UpperCAmelCase_ : Optional[int] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def A ( self : Any ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def A ( self : Any ) -> List[str]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def A ( self : Union[str, Any] ) -> List[Any]:
pass
def A ( self : Optional[Any] ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[str] = model_class(_A )
UpperCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase_ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def A ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def A ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def A ( self : str ) -> int:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def A ( self : Optional[int] ) -> List[str]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_A )
def A ( self : List[Any] ) -> int:
if not self.model_tester.is_training:
return
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_A ), BeitForMaskedImageModeling]:
continue
UpperCAmelCase_ : Optional[int] = model_class(_A )
model.to(_A )
model.train()
UpperCAmelCase_ : Optional[int] = self._prepare_for_class(_A , _A , return_labels=_A )
UpperCAmelCase_ : str = model(**_A ).loss
loss.backward()
def A ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Optional[int] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_A ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase_ : List[str] = model_class(_A )
model.gradient_checkpointing_enable()
model.to(_A )
model.train()
UpperCAmelCase_ : List[str] = self._prepare_for_class(_A , _A , return_labels=_A )
UpperCAmelCase_ : List[str] = model(**_A ).loss
loss.backward()
def A ( self : Union[str, Any] ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = _config_zero_init(_A )
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(config=_A )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def A ( self : List[Any] ) -> Optional[int]:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = BeitModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __UpperCAmelCase ( ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase):
@cached_property
def A ( self : Optional[Any] ) -> Union[str, Any]:
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def A ( self : str ) -> str:
UpperCAmelCase_ : Any = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(_A )
UpperCAmelCase_ : Optional[int] = self.default_image_processor
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : Dict = image_processor(images=_A , return_tensors='''pt''' ).pixel_values.to(_A )
# prepare bool_masked_pos
UpperCAmelCase_ : List[str] = torch.ones((1, 1_96) , dtype=torch.bool ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : List[str] = model(pixel_values=_A , bool_masked_pos=_A )
UpperCAmelCase_ : Tuple = outputs.logits
# verify the logits
UpperCAmelCase_ : str = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , _A )
UpperCAmelCase_ : Dict = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(_A )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _A , atol=1e-2 ) )
@slow
def A ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase_ : str = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(_A )
UpperCAmelCase_ : Dict = self.default_image_processor
UpperCAmelCase_ : List[Any] = prepare_img()
UpperCAmelCase_ : Union[str, Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Any = model(**_A )
UpperCAmelCase_ : Any = outputs.logits
# verify the logits
UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , _A )
UpperCAmelCase_ : List[str] = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(_A )
self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1e-4 ) )
UpperCAmelCase_ : Dict = 2_81
self.assertEqual(logits.argmax(-1 ).item() , _A )
@slow
def A ( self : List[str] ) -> int:
UpperCAmelCase_ : str = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
_A )
UpperCAmelCase_ : List[Any] = self.default_image_processor
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : Union[str, Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Tuple = model(**_A )
UpperCAmelCase_ : Dict = outputs.logits
# verify the logits
UpperCAmelCase_ : Any = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , _A )
UpperCAmelCase_ : List[Any] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(_A )
self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1e-4 ) )
UpperCAmelCase_ : Tuple = 23_96
self.assertEqual(logits.argmax(-1 ).item() , _A )
@slow
def A ( self : Dict ) -> List[str]:
UpperCAmelCase_ : Tuple = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
UpperCAmelCase_ : int = model.to(_A )
UpperCAmelCase_ : List[Any] = BeitImageProcessor(do_resize=_A , size=6_40 , do_center_crop=_A )
UpperCAmelCase_ : List[Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ : Tuple = Image.open(ds[0]['''file'''] )
UpperCAmelCase_ : List[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : str = model(**_A )
UpperCAmelCase_ : Dict = outputs.logits
# verify the logits
UpperCAmelCase_ : Optional[int] = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , _A )
UpperCAmelCase_ : Optional[Any] = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
UpperCAmelCase_ : Optional[int] = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] , device=_A , )
else:
UpperCAmelCase_ : Optional[Any] = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] , device=_A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _A , atol=1e-4 ) )
@slow
def A ( self : int ) -> Optional[Any]:
UpperCAmelCase_ : Dict = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
UpperCAmelCase_ : Optional[int] = model.to(_A )
UpperCAmelCase_ : Any = BeitImageProcessor(do_resize=_A , size=6_40 , do_center_crop=_A )
UpperCAmelCase_ : Tuple = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ : Optional[int] = Image.open(ds[0]['''file'''] )
UpperCAmelCase_ : List[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(**_A )
UpperCAmelCase_ : Dict = outputs.logits.detach().cpu()
UpperCAmelCase_ : Tuple = image_processor.post_process_semantic_segmentation(outputs=_A , target_sizes=[(5_00, 3_00)] )
UpperCAmelCase_ : List[Any] = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , _A )
UpperCAmelCase_ : List[str] = image_processor.post_process_semantic_segmentation(outputs=_A )
UpperCAmelCase_ : List[str] = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , _A )
| 216
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( A : str , A : List[Any] , A : Tuple ) -> str:
return params[F"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def __UpperCAmelCase ( A : int , A : Any , A : Dict , A : Any="attention" ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
UpperCAmelCase_ : int = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
UpperCAmelCase_ : Dict = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
UpperCAmelCase_ : Optional[int] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
UpperCAmelCase_ : List[Any] = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
UpperCAmelCase_ : int = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
UpperCAmelCase_ : Tuple = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
UpperCAmelCase_ : List[Any] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __UpperCAmelCase ( A : Optional[Any] , A : Tuple , A : Optional[int] , A : str=False ) -> Dict:
if split_mlp_wi:
UpperCAmelCase_ : List[Any] = params[F"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
UpperCAmelCase_ : str = params[F"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
UpperCAmelCase_ : Tuple = (wi_a, wi_a)
else:
UpperCAmelCase_ : List[str] = params[F"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
UpperCAmelCase_ : Dict = params[F"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def __UpperCAmelCase ( A : Tuple , A : int , A : Optional[Any] , A : int ) -> Dict:
return params[F"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def __UpperCAmelCase ( A : dict , *, A : int , A : bool , A : bool = False ) -> Any:
UpperCAmelCase_ : int = traverse_util.flatten_dict(variables['''target'''] )
UpperCAmelCase_ : Optional[int] = {'''/'''.join(A ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCAmelCase_ : int = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , A )
UpperCAmelCase_ : Any = collections.OrderedDict()
# Shared embeddings.
UpperCAmelCase_ : int = old['''token_embedder/embedding''']
# Encoder.
for i in range(A ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase_ : List[str] = tax_layer_norm_lookup(A , A , '''encoder''' , '''pre_attention_layer_norm''' )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = tax_attention_lookup(A , A , '''encoder''' , '''attention''' )
UpperCAmelCase_ : int = layer_norm
UpperCAmelCase_ : Union[str, Any] = k.T
UpperCAmelCase_ : str = o.T
UpperCAmelCase_ : List[Any] = q.T
UpperCAmelCase_ : Dict = v.T
# Block i, layer 1 (MLP).
UpperCAmelCase_ : str = tax_layer_norm_lookup(A , A , '''encoder''' , '''pre_mlp_layer_norm''' )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = tax_mlp_lookup(A , A , '''encoder''' , A )
UpperCAmelCase_ : List[Any] = layer_norm
if split_mlp_wi:
UpperCAmelCase_ : Dict = wi[0].T
UpperCAmelCase_ : Dict = wi[1].T
else:
UpperCAmelCase_ : Tuple = wi.T
UpperCAmelCase_ : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCAmelCase_ : Optional[Any] = tax_relpos_bias_lookup(
A , A , '''encoder''' ).T
UpperCAmelCase_ : Any = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
UpperCAmelCase_ : Optional[Any] = tax_relpos_bias_lookup(
A , 0 , '''encoder''' ).T
UpperCAmelCase_ : List[str] = tax_relpos_bias_lookup(
A , 0 , '''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(A ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase_ : Optional[int] = tax_layer_norm_lookup(A , A , '''decoder''' , '''pre_self_attention_layer_norm''' )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = tax_attention_lookup(A , A , '''decoder''' , '''self_attention''' )
UpperCAmelCase_ : int = layer_norm
UpperCAmelCase_ : Any = k.T
UpperCAmelCase_ : Optional[int] = o.T
UpperCAmelCase_ : List[Any] = q.T
UpperCAmelCase_ : str = v.T
# Block i, layer 1 (Cross Attention).
UpperCAmelCase_ : str = tax_layer_norm_lookup(A , A , '''decoder''' , '''pre_cross_attention_layer_norm''' )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = tax_attention_lookup(A , A , '''decoder''' , '''encoder_decoder_attention''' )
UpperCAmelCase_ : Any = layer_norm
UpperCAmelCase_ : Optional[Any] = k.T
UpperCAmelCase_ : Union[str, Any] = o.T
UpperCAmelCase_ : List[str] = q.T
UpperCAmelCase_ : Any = v.T
# Block i, layer 2 (MLP).
UpperCAmelCase_ : Dict = tax_layer_norm_lookup(A , A , '''decoder''' , '''pre_mlp_layer_norm''' )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = tax_mlp_lookup(A , A , '''decoder''' , A )
UpperCAmelCase_ : Optional[int] = layer_norm
if split_mlp_wi:
UpperCAmelCase_ : Optional[int] = wi[0].T
UpperCAmelCase_ : int = wi[1].T
else:
UpperCAmelCase_ : Any = wi.T
UpperCAmelCase_ : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCAmelCase_ : List[str] = tax_relpos_bias_lookup(A , A , '''decoder''' ).T
UpperCAmelCase_ : Optional[Any] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCAmelCase_ : int = old['''decoder/logits_dense/kernel'''].T
return new
def __UpperCAmelCase ( A : Tuple , A : bool ) -> List[str]:
UpperCAmelCase_ : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase_ : Optional[Any] = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase_ : Optional[int] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
UpperCAmelCase_ : int = state_dict['''shared.weight''']
return state_dict
def __UpperCAmelCase ( A : Any , A : Optional[Any] , A : Optional[Any] , A : str , A : Optional[int] ) -> Dict:
UpperCAmelCase_ : List[str] = checkpoints.load_tax_checkpoint(A )
UpperCAmelCase_ : str = convert_tax_to_pytorch(
A , num_layers=config.num_layers , is_encoder_only=A , scalable_attention=A )
UpperCAmelCase_ : Union[str, Any] = make_state_dict(A , A )
model.load_state_dict(A , strict=A )
def __UpperCAmelCase ( A : str , A : int , A : List[str] , A : bool = False , A : bool = False , ) -> Any:
UpperCAmelCase_ : Union[str, Any] = MTaConfig.from_json_file(A )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCAmelCase_ : Dict = UMTaEncoderModel(A )
else:
UpperCAmelCase_ : Dict = UMTaForConditionalGeneration(A )
# Load weights from tf checkpoint
load_tax_weights_in_ta(A , A , A , A , A )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(A )
# Verify that we can load the checkpoint.
model.from_pretrained(A )
print('''Done''' )
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
_UpperCamelCase : Optional[int] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 216
| 1
|
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
def lowercase__ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict ) -> Optional[int]:
'''simple docstring'''
try:
with open(lowerCAmelCase__ , "rb" ) as flax_state_f:
a__ : int = from_bytes(lowerCAmelCase__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowerCAmelCase__ ) as f:
if f.read().startswith("version" ):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please"
" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
" folder you cloned." )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase__ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
a__ : str = flatten_dict(jax.tree_util.tree_map(lambda lowerCAmelCase__ : x.dtype == jnp.bfloataa , lowerCAmelCase__ ) ).values()
if any(lowerCAmelCase__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
a__ : List[Any] = jax.tree_util.tree_map(
lambda lowerCAmelCase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowerCAmelCase__ )
a__ : Tuple = ""
a__ : Any = flatten_dict(lowerCAmelCase__ , sep="." )
a__ : str = pt_model.state_dict()
# keep track of unexpected & missing keys
a__ : Optional[int] = []
a__ : List[Any] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
a__ : Optional[Any] = flax_key_tuple.split("." )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
a__ : Optional[int] = flax_key_tuple_array[:-1] + ["weight"]
a__ : Tuple = jnp.transpose(lowerCAmelCase__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
a__ : Dict = flax_key_tuple_array[:-1] + ["weight"]
a__ : Any = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
a__ : List[str] = flax_key_tuple_array[:-1] + ["weight"]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowerCAmelCase__ ):
a__ : str = (
flax_key_tuple_string.replace("_0" , ".0" )
.replace("_1" , ".1" )
.replace("_2" , ".2" )
.replace("_3" , ".3" )
.replace("_4" , ".4" )
.replace("_5" , ".5" )
.replace("_6" , ".6" )
.replace("_7" , ".7" )
.replace("_8" , ".8" )
.replace("_9" , ".9" )
)
a__ : int = ".".join(lowerCAmelCase__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
a__ : Tuple = np.asarray(lowerCAmelCase__ ) if not isinstance(lowerCAmelCase__ , np.ndarray ) else flax_tensor
a__ : Any = torch.from_numpy(lowerCAmelCase__ )
# remove from missing keys
missing_keys.remove(lowerCAmelCase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowerCAmelCase__ )
pt_model.load_state_dict(lowerCAmelCase__ )
# re-transform missing_keys to list
a__ : str = list(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
if len(lowerCAmelCase__ ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
" use it for predictions and inference." )
return pt_model
| 642
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''spiece.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class __UpperCAmelCase ( _UpperCamelCase ):
def __init__( self : str , a_ : Dict , a_ : List[str]=False , a_ : Any=True , a_ : int=False , a_ : Union[str, Any]="<s>" , a_ : Optional[int]="</s>" , a_ : int="<unk>" , a_ : List[Any]="<sep>" , a_ : Dict="<pad>" , a_ : Any="<cls>" , a_ : Optional[Any]="<mask>" , a_ : int=["<eop>", "<eod>"] , a_ : Optional[Dict[str, Any]] = None , **a_ : int , ) -> None:
'''simple docstring'''
a__ : List[str] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
a__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
a__ : Union[str, Any] = 3
a__ : Dict = do_lower_case
a__ : Union[str, Any] = remove_space
a__ : int = keep_accents
a__ : str = vocab_file
a__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
a__ : Optional[int] = jieba
a__ : Optional[int] = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> List[str]:
'''simple docstring'''
a__ : Tuple = self.__dict__.copy()
a__ : Union[str, Any] = None
return state
def __setstate__( self : Tuple , a_ : int ) -> List[str]:
'''simple docstring'''
a__ : Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__ : str = {}
a__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self : List[Any] , a_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
if self.remove_space:
a__ : Union[str, Any] = " ".join(inputs.strip().split() )
else:
a__ : Optional[Any] = inputs
a__ : List[str] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
a__ : Union[str, Any] = unicodedata.normalize("NFKD" , a_ )
a__ : Union[str, Any] = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
a__ : List[Any] = outputs.lower()
return outputs
def UpperCAmelCase ( self : Any , a_ : str ) -> List[str]:
'''simple docstring'''
a__ : Optional[Any] = self.preprocess_text(a_ )
a__ : Dict = self.sp_model.encode(a_ , out_type=a_ )
a__ : Optional[Any] = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
a__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
a__ : List[str] = cur_pieces[1:]
else:
a__ : List[str] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def UpperCAmelCase ( self : int , a_ : Dict ) -> int:
'''simple docstring'''
return self.sp_model.PieceToId(a_ )
def UpperCAmelCase ( self : Dict , a_ : Tuple ) -> List[Any]:
'''simple docstring'''
return self.sp_model.IdToPiece(a_ )
def UpperCAmelCase ( self : Union[str, Any] , a_ : List[Any] ) -> str:
'''simple docstring'''
a__ : Optional[Any] = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def UpperCAmelCase ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : List[Any] = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase ( self : int , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1]
return ([0] * len(a_ )) + [1, 1]
def UpperCAmelCase ( self : List[Any] , a_ : List[int] , a_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : List[str] = [self.sep_token_id]
a__ : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase ( self : Dict , a_ : str , a_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
a__ : Optional[int] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
a__ : int = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
def UpperCAmelCase ( self : str , *a_ : Union[str, Any] , **a_ : Any ) -> int:
'''simple docstring'''
a__ : Optional[int] = super()._decode(*a_ , **a_ )
a__ : Tuple = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 642
| 1
|
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=3_2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=1_0 , SCREAMING_SNAKE_CASE=[1_0, 2_0, 3_0, 4_0] , SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="relu" , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , ) -> Optional[int]:
a__ = parent
a__ = batch_size
a__ = image_size
a__ = num_channels
a__ = embeddings_size
a__ = hidden_sizes
a__ = depths
a__ = is_training
a__ = use_labels
a__ = hidden_act
a__ = num_labels
a__ = scope
a__ = len(SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Dict:
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.num_labels )
a__ = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Dict:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
a__ = RegNetModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
a__ = self.num_labels
a__ = RegNetForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self ) -> Any:
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ = config_and_inputs
a__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
_lowercase : int = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
_lowercase : Dict = (
{'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification}
if is_torch_available()
else {}
)
_lowercase : Any = False
_lowercase : List[Any] = False
_lowercase : List[str] = False
_lowercase : Dict = False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
a__ = RegNetModelTester(self )
a__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> int:
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(SCREAMING_SNAKE_CASE )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> str:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(config=SCREAMING_SNAKE_CASE )
for name, module in model.named_modules():
if isinstance(SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def _UpperCAmelCase ( self ) -> str:
def check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
a__ = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
a__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a__ = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
a__ = layer_type
a__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = RegNetModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __a ( ):
a__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> Dict:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> List[str]:
a__ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(SCREAMING_SNAKE_CASE )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
a__ = model(**SCREAMING_SNAKE_CASE )
# verify the logits
a__ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
a__ = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 148
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1_2 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=9_9 , SCREAMING_SNAKE_CASE=3_2 , SCREAMING_SNAKE_CASE=3_2 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=3_7 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=5_1_2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=None , ) -> Union[str, Any]:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_mask
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = projection_dim
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = dropout
a__ = attention_dropout
a__ = max_position_embeddings
a__ = initializer_range
a__ = scope
a__ = bos_token_id
def _UpperCAmelCase ( self ) -> str:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
a__ = input_mask.numpy()
a__ , a__ = input_mask.shape
a__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE ):
a__ = 1
a__ = 0
a__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Dict:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
a__ = TFBlipTextModel(config=SCREAMING_SNAKE_CASE )
a__ = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
a__ = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self ) -> Tuple:
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ = config_and_inputs
a__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
_lowercase : List[str] = (TFBlipTextModel,) if is_tf_available() else ()
_lowercase : Optional[int] = False
_lowercase : Dict = False
_lowercase : str = False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
a__ = BlipTextModelTester(self )
a__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> int:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@slow
def _UpperCAmelCase ( self ) -> Any:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE=True ) -> Tuple:
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE )
| 148
| 1
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = False ) -> str:
UpperCamelCase_ = scheduler
UpperCamelCase_ = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers]
UpperCamelCase_ = split_batches
UpperCamelCase_ = step_with_optimizer
UpperCamelCase_ = GradientState()
def _UpperCAmelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
UpperCamelCase_ = AcceleratorState().num_processes
for _ in range(_UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
return self.scheduler.get_last_lr()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return self.scheduler.state_dict()
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]:
self.scheduler.load_state_dict(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
return self.scheduler.get_lr()
def _UpperCAmelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
| 23
|
def lowercase( UpperCamelCase_ = 100 ) -> int:
'''simple docstring'''
UpperCamelCase = n * (n + 1) * (2 * n + 1) / 6
UpperCamelCase = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 537
| 0
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
__A = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def __A ( _lowercase ):
'''simple docstring'''
_A = torch.load(_lowercase , map_location='''cpu''' )
return sd
def __A ( _lowercase , _lowercase , _lowercase=rename_keys_prefix ):
'''simple docstring'''
_A = OrderedDict()
_A = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_A = key
for name_pair in rename_keys_prefix:
_A = new_key.replace(name_pair[0] , name_pair[1] )
_A = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_A = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
_A = '''pretraining'''
if "vcr" in checkpoint_path:
_A = {'''visual_embedding_dim''': 5_12}
elif "vqa_advanced" in checkpoint_path:
_A = {'''visual_embedding_dim''': 20_48}
elif "vqa" in checkpoint_path:
_A = {'''visual_embedding_dim''': 20_48}
elif "nlvr" in checkpoint_path:
_A = {'''visual_embedding_dim''': 10_24}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
_A = {'''visual_embedding_dim''': 5_12}
_A = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
_A = {'''visual_embedding_dim''': 20_48}
_A = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
_A = {'''visual_embedding_dim''': 20_48, '''num_labels''': 31_29}
_A = '''vqa'''
elif "nlvr" in checkpoint_path:
_A = {
'''visual_embedding_dim''': 10_24,
'''num_labels''': 2,
}
_A = '''nlvr'''
_A = VisualBertConfig(**_lowercase )
# Load State Dict
_A = load_state_dict(_lowercase )
_A = get_new_dict(_lowercase , _lowercase )
if model_type == "pretraining":
_A = VisualBertForPreTraining(_lowercase )
elif model_type == "vqa":
_A = VisualBertForQuestionAnswering(_lowercase )
elif model_type == "nlvr":
_A = VisualBertForVisualReasoning(_lowercase )
elif model_type == "multichoice":
_A = VisualBertForMultipleChoice(_lowercase )
model.load_state_dict(_lowercase )
# Save Checkpoints
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
__A = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 711
|
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_A = mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase )
else:
_A = max(
mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) , mf_knapsack(i - 1 , _lowercase , _lowercase , j - wt[i - 1] ) + val[i - 1] , )
_A = val
return f[i][j]
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_A = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_A = dp[i - 1][w_]
return dp[n][w_], dp
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not (isinstance(_lowercase , (list, tuple) ) and isinstance(_lowercase , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
_A = len(_lowercase )
if num_items != len(_lowercase ):
_A = (
'''The number of weights must be the same as the number of values.\n'''
f"""But got {num_items} weights and {len(_lowercase )} values"""
)
raise ValueError(_lowercase )
for i in range(_lowercase ):
if not isinstance(wt[i] , _lowercase ):
_A = (
'''All weights must be integers but got weight of '''
f"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(_lowercase )
_A ,_A = knapsack(_lowercase , _lowercase , _lowercase , _lowercase )
_A = set()
_construct_solution(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
return optimal_val, example_optional_set
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_lowercase , _lowercase , i - 1 , _lowercase , _lowercase )
else:
optimal_set.add(_lowercase )
_construct_solution(_lowercase , _lowercase , i - 1 , j - wt[i - 1] , _lowercase )
if __name__ == "__main__":
__A = [3, 2, 4, 4]
__A = [4, 3, 2, 3]
__A = 4
__A = 6
__A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__A , __A = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__A , __A = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 62
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase_ ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = TextToVideoSDPipeline
UpperCAmelCase : List[str] = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase : int = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCAmelCase : Any = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def lowerCAmelCase_ ( self : Optional[Any] ):
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
_A = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
_A = CLIPTextModel(_UpperCAmelCase )
_A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_A = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
_A = torch.manual_seed(_UpperCAmelCase )
else:
_A = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = 'cpu' # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = TextToVideoSDPipeline(**_UpperCAmelCase )
_A = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_A = self.get_dummy_inputs(_UpperCAmelCase )
_A = 'np'
_A = sd_pipe(**_UpperCAmelCase ).frames
_A = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_A = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self : Any ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_UpperCAmelCase , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCAmelCase_ ( self : int ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_UpperCAmelCase , expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
def lowerCAmelCase_ ( self : Union[str, Any] ):
return super().test_progress_bar()
@slow
@skip_mps
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any ):
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
_A = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
_A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_A = pipe.to('cuda' )
_A = 'Spiderman is surfing'
_A = torch.Generator(device='cpu' ).manual_seed(0 )
_A = pipe(_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=25 , output_type='pt' ).frames
_A = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def lowerCAmelCase_ ( self : Dict ):
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
_A = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
_A = pipe.to('cuda' )
_A = 'Spiderman is surfing'
_A = torch.Generator(device='cpu' ).manual_seed(0 )
_A = pipe(_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='pt' ).frames
_A = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 7
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__A = TypeVar("T")
def lowercase__ ( A_: int ) -> int:
"""simple docstring"""
return (position - 1) // 2
def lowercase__ ( A_: int ) -> int:
"""simple docstring"""
return (2 * position) + 1
def lowercase__ ( A_: int ) -> int:
"""simple docstring"""
return (2 * position) + 2
class _A ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[str] ) -> None:
__UpperCAmelCase =[]
__UpperCAmelCase ={}
__UpperCAmelCase =0
def __len__( self : str ) -> int:
return self.elements
def __repr__( self : Dict ) -> str:
return str(self.heap )
def _a ( self : Optional[int] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__UpperCAmelCase =self.elements
self.elements += 1
self._bubble_up(__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__UpperCAmelCase , __UpperCAmelCase =self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__UpperCAmelCase , __UpperCAmelCase =self.heap[0]
self._bubble_down(__SCREAMING_SNAKE_CASE )
return elem
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
# Update the weight of the given key
__UpperCAmelCase =self.position_map[elem]
__UpperCAmelCase =(elem, weight)
if position > 0:
__UpperCAmelCase =get_parent_position(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase =self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__SCREAMING_SNAKE_CASE )
else:
self._bubble_down(__SCREAMING_SNAKE_CASE )
else:
self._bubble_down(__SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__UpperCAmelCase =self.position_map[elem]
if curr_pos == 0:
return None
__UpperCAmelCase =get_parent_position(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase =self.heap[curr_pos]
__UpperCAmelCase , __UpperCAmelCase =self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_up(__SCREAMING_SNAKE_CASE )
return None
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__UpperCAmelCase =self.position_map[elem]
__UpperCAmelCase , __UpperCAmelCase =self.heap[curr_pos]
__UpperCAmelCase =get_child_left_position(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =get_child_right_position(__SCREAMING_SNAKE_CASE )
if child_left_position < self.elements and child_right_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_left_position]
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
if child_left_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
else:
return None
if child_right_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
return None
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None:
# Swap the nodes at the given positions
__UpperCAmelCase =self.heap[nodea_pos][0]
__UpperCAmelCase =self.heap[nodea_pos][0]
__UpperCAmelCase , __UpperCAmelCase =(
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__UpperCAmelCase =nodea_pos
__UpperCAmelCase =nodea_pos
class _A ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[Any] ) -> None:
__UpperCAmelCase ={}
__UpperCAmelCase =0
def __repr__( self : Tuple ) -> str:
return str(self.connections )
def __len__( self : str ) -> int:
return self.nodes
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__UpperCAmelCase ={}
self.nodes += 1
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__SCREAMING_SNAKE_CASE )
self.add_node(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =weight
__UpperCAmelCase =weight
def lowercase__ ( A_: GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
"""simple docstring"""
__UpperCAmelCase ={node: maxsize for node in graph.connections}
__UpperCAmelCase ={node: None for node in graph.connections}
__UpperCAmelCase =MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(A_ , A_ )
if priority_queue.is_empty():
return dist, parent
# initialization
__UpperCAmelCase =priority_queue.extract_min()
__UpperCAmelCase =0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__UpperCAmelCase =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(A_ , dist[neighbour] )
__UpperCAmelCase =node
# running prim's algorithm
while not priority_queue.is_empty():
__UpperCAmelCase =priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__UpperCAmelCase =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(A_ , dist[neighbour] )
__UpperCAmelCase =node
return dist, parent
| 68
| 0
|
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = 256
# Modulus to hash a string
__SCREAMING_SNAKE_CASE : int = 1_000_003
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
snake_case_ = len(_SCREAMING_SNAKE_CASE )
snake_case_ = len(_SCREAMING_SNAKE_CASE )
if p_len > t_len:
return False
snake_case_ = 0
snake_case_ = 0
snake_case_ = 1
# Calculating the hash of pattern and substring of text
for i in range(_SCREAMING_SNAKE_CASE ):
snake_case_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case_ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _a ( ) -> None:
snake_case_ = """abc1abc12"""
snake_case_ = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
snake_case_ = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Test 2)
snake_case_ = """ABABX"""
snake_case_ = """ABABZABABYABABX"""
assert rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Test 3)
snake_case_ = """AAAB"""
snake_case_ = """ABAAAAAB"""
assert rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Test 4)
snake_case_ = """abcdabcy"""
snake_case_ = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Test 5)
snake_case_ = """Lü"""
snake_case_ = """Lüsai"""
assert rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = """Lue"""
assert not rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 2
|
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: str = VQModel
__lowercase: Union[str, Any] = """sample"""
@property
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[str]=(32, 32) ) ->Tuple:
"""simple docstring"""
snake_case_ = 4
snake_case_ = 3
snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
return {"sample": image}
@property
def lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self : List[str] ) ->Dict:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCAmelCase_ )
snake_case_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(UpperCAmelCase_ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
snake_case_ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
snake_case_ = image.to(UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = model(UpperCAmelCase_ ).sample
snake_case_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
| 2
| 1
|
'''simple docstring'''
from typing import Any
class __magic_name__ :
def __init__( self : Optional[Any] ,_UpperCAmelCase : Any ):
_a : str = data
_a : Optional[int] = None
class __magic_name__ :
def __init__( self : int ):
_a : Optional[Any] = None
def __lowercase ( self : List[str] ):
_a : List[Any] = self.head
while temp is not None:
print(temp.data ,end=' ' )
_a : Dict = temp.next
print()
def __lowercase ( self : Dict ,_UpperCAmelCase : Any ):
_a : Union[str, Any] = Node(_UpperCAmelCase )
_a : Tuple = self.head
_a : Optional[Any] = new_node
def __lowercase ( self : str ,_UpperCAmelCase : str ,_UpperCAmelCase : str ):
if node_data_a == node_data_a:
return
else:
_a : Optional[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
_a : str = node_a.next
_a : int = self.head
while node_a is not None and node_a.data != node_data_a:
_a : Any = node_a.next
if node_a is None or node_a is None:
return
_a , _a : Optional[Any] = node_a.data, node_a.data
if __name__ == "__main__":
__lowerCAmelCase = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 358
|
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 358
| 1
|
"""simple docstring"""
def A ( snake_case__ ):
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
SCREAMING_SNAKE_CASE__ = sum(snake_case__ ) / len(snake_case__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 616
|
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 616
| 1
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 322
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
def __init__( self : int , *UpperCamelCase : Optional[Any] , **UpperCamelCase : int ):
"""simple docstring"""
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 322
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=0.9 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
lowercase__ : Dict = size if size is not None else {"""shortest_edge""": 30}
lowercase__ : List[Any] = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
lowercase__ : List[str] = parent
lowercase__ : Tuple = batch_size
lowercase__ : List[str] = num_channels
lowercase__ : Optional[Any] = min_resolution
lowercase__ : Tuple = max_resolution
lowercase__ : str = do_resize_and_center_crop
lowercase__ : Union[str, Any] = size
lowercase__ : Tuple = crop_pct
lowercase__ : List[str] = crop_size
lowercase__ : Union[str, Any] = do_normalize
lowercase__ : Optional[Any] = image_mean
lowercase__ : int = image_std
def lowercase__ ( self):
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Any = PoolFormerImageProcessor if is_vision_available() else None
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = PoolFormerImageProcessingTester(self)
@property
def lowercase__ ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize_and_center_crop"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """crop_pct"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_mean"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_std"""))
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""shortest_edge""": 30})
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30})
lowercase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"""shortest_edge""": 42})
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84})
def lowercase__ ( self):
'''simple docstring'''
pass
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image)
# Test not batched input
lowercase__ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowercase__ : str = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray)
# Test not batched input
lowercase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowercase__ : str = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor)
# Test not batched input
lowercase__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowercase__ : str = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 700
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
if components is None:
lowercase__ : List[str] = []
lowercase__ : Dict = list(SCREAMING_SNAKE_CASE_)
def __len__( self):
'''simple docstring'''
return len(self.__components)
def __str__( self):
'''simple docstring'''
return "(" + ",".join(map(SCREAMING_SNAKE_CASE_ , self.__components)) + ")"
def __add__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[Any] = len(self)
if size == len(SCREAMING_SNAKE_CASE_):
lowercase__ : List[str] = [self.__components[i] + other.component(SCREAMING_SNAKE_CASE_) for i in range(SCREAMING_SNAKE_CASE_)]
return Vector(SCREAMING_SNAKE_CASE_)
else:
raise Exception("""must have the same size""")
def __sub__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[Any] = len(self)
if size == len(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[Any] = [self.__components[i] - other.component(SCREAMING_SNAKE_CASE_) for i in range(SCREAMING_SNAKE_CASE_)]
return Vector(SCREAMING_SNAKE_CASE_)
else: # error case
raise Exception("""must have the same size""")
@overload
def __mul__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
...
@overload
def __mul__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
...
def __mul__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , (float, int)):
lowercase__ : Optional[int] = [c * other for c in self.__components]
return Vector(SCREAMING_SNAKE_CASE_)
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and len(self) == len(SCREAMING_SNAKE_CASE_):
lowercase__ : Dict = len(self)
lowercase__ : Optional[Any] = [self.__components[i] * other.component(SCREAMING_SNAKE_CASE_) for i in range(SCREAMING_SNAKE_CASE_)]
return sum(SCREAMING_SNAKE_CASE_)
else: # error case
raise Exception("""invalid operand!""")
def lowercase__ ( self):
'''simple docstring'''
return Vector(self.__components)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and -len(self.__components) <= i < len(self.__components):
return self.__components[i]
else:
raise Exception("""index out of range""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
assert -len(self.__components) <= pos < len(self.__components)
lowercase__ : List[Any] = value
def lowercase__ ( self):
'''simple docstring'''
if len(self.__components) == 0:
raise Exception("""Vector is empty""")
lowercase__ : Union[str, Any] = [c**2 for c in self.__components]
return math.sqrt(sum(SCREAMING_SNAKE_CASE_))
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False):
'''simple docstring'''
lowercase__ : Union[str, Any] = self * other
lowercase__ : Optional[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den))
else:
return math.acos(num / den)
def UpperCamelCase ( lowercase_ ) -> Vector:
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_ )
return Vector([0] * dimension )
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Vector:
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_ ) and (isinstance(lowercase_ , lowercase_ ))
lowercase__ : Union[str, Any] = [0] * dimension
lowercase__ : Any = 1
return Vector(lowercase_ )
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Vector:
'''simple docstring'''
assert (
isinstance(lowercase_ , lowercase_ )
and isinstance(lowercase_ , lowercase_ )
and (isinstance(lowercase_ , (int, float) ))
)
return x * scalar + y
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Vector:
'''simple docstring'''
random.seed(lowercase_ )
lowercase__ : int = [random.randint(lowercase_ , lowercase_ ) for _ in range(lowercase_ )]
return Vector(lowercase_ )
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[Any] = matrix
lowercase__ : Any = w
lowercase__ : Any = h
def __str__( self):
'''simple docstring'''
lowercase__ : str = """"""
for i in range(self.__height):
ans += "|"
for j in range(self.__width):
if j < self.__width - 1:
ans += str(self.__matrix[i][j]) + ","
else:
ans += str(self.__matrix[i][j]) + "|\n"
return ans
def __add__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
lowercase__ : Tuple = []
for i in range(self.__height):
lowercase__ : Tuple = [
self.__matrix[i][j] + other.component(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
for j in range(self.__width)
]
matrix.append(SCREAMING_SNAKE_CASE_)
return Matrix(SCREAMING_SNAKE_CASE_ , self.__width , self.__height)
else:
raise Exception("""matrix must have the same dimension!""")
def __sub__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
lowercase__ : Optional[int] = []
for i in range(self.__height):
lowercase__ : List[str] = [
self.__matrix[i][j] - other.component(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
for j in range(self.__width)
]
matrix.append(SCREAMING_SNAKE_CASE_)
return Matrix(SCREAMING_SNAKE_CASE_ , self.__width , self.__height)
else:
raise Exception("""matrices must have the same dimension!""")
@overload
def __mul__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
...
@overload
def __mul__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
...
def __mul__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): # matrix-vector
if len(SCREAMING_SNAKE_CASE_) == self.__width:
lowercase__ : List[Any] = zero_vector(self.__height)
for i in range(self.__height):
lowercase__ : Union[str, Any] = [
self.__matrix[i][j] * other.component(SCREAMING_SNAKE_CASE_)
for j in range(self.__width)
]
ans.change_component(SCREAMING_SNAKE_CASE_ , sum(SCREAMING_SNAKE_CASE_))
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""")
elif isinstance(SCREAMING_SNAKE_CASE_ , (int, float)): # matrix-scalar
lowercase__ : Tuple = [
[self.__matrix[i][j] * other for j in range(self.__width)]
for i in range(self.__height)
]
return Matrix(SCREAMING_SNAKE_CASE_ , self.__width , self.__height)
return None
def lowercase__ ( self):
'''simple docstring'''
return self.__height
def lowercase__ ( self):
'''simple docstring'''
return self.__width
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
lowercase__ : Tuple = value
else:
raise Exception("""change_component: indices out of bounds""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""")
lowercase__ : List[Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(SCREAMING_SNAKE_CASE_)):
lowercase__ : List[str] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(SCREAMING_SNAKE_CASE_ , self.__width - 1 , self.__height - 1).determinant()
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""")
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
else:
raise Exception("""Indices out of bounds""")
def lowercase__ ( self):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""")
if self.__height < 1:
raise Exception("""Matrix has no element""")
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowercase__ : Optional[int] = [
self.__matrix[0][y] * self.cofactor(0 , SCREAMING_SNAKE_CASE_) for y in range(self.__width)
]
return sum(SCREAMING_SNAKE_CASE_)
def UpperCamelCase ( lowercase_ ) -> Matrix:
'''simple docstring'''
lowercase__ : list[list[float]] = [[0] * n for _ in range(lowercase_ )]
return Matrix(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Matrix:
'''simple docstring'''
random.seed(lowercase_ )
lowercase__ : list[list[float]] = [
[random.randint(lowercase_ , lowercase_ ) for _ in range(lowercase_ )] for _ in range(lowercase_ )
]
return Matrix(lowercase_ , lowercase_ , lowercase_ )
| 495
| 0
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : Dict ='''efficientnet'''
def __init__( self :Any, snake_case :int = 3, snake_case :int = 600, snake_case :float = 2.0, snake_case :float = 3.1, snake_case :int = 8, snake_case :List[int] = [3, 3, 5, 3, 5, 5, 3], snake_case :List[int] = [32, 16, 24, 40, 80, 112, 192], snake_case :List[int] = [16, 24, 40, 80, 112, 192, 320], snake_case :List[int] = [], snake_case :List[int] = [1, 2, 2, 2, 1, 2, 1], snake_case :List[int] = [1, 2, 2, 3, 3, 4, 1], snake_case :List[int] = [1, 6, 6, 6, 6, 6, 6], snake_case :float = 0.2_5, snake_case :str = "swish", snake_case :int = 2560, snake_case :str = "mean", snake_case :float = 0.0_2, snake_case :float = 0.0_0_1, snake_case :float = 0.9_9, snake_case :float = 0.5, snake_case :float = 0.2, **snake_case :List[Any], ):
"""simple docstring"""
super().__init__(**snake_case)
_lowercase =num_channels
_lowercase =image_size
_lowercase =width_coefficient
_lowercase =depth_coefficient
_lowercase =depth_divisor
_lowercase =kernel_sizes
_lowercase =in_channels
_lowercase =out_channels
_lowercase =depthwise_padding
_lowercase =strides
_lowercase =num_block_repeats
_lowercase =expand_ratios
_lowercase =squeeze_expansion_ratio
_lowercase =hidden_act
_lowercase =hidden_dim
_lowercase =pooling_type
_lowercase =initializer_range
_lowercase =batch_norm_eps
_lowercase =batch_norm_momentum
_lowercase =dropout_rate
_lowercase =drop_connect_rate
_lowercase =sum(snake_case) * 4
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : List[Any] =version.parse('''1.11''' )
@property
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
return 1e-5
| 181
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any, snake_case :Optional[int], snake_case :Optional[Any]=7, snake_case :str=3, snake_case :Optional[int]=18, snake_case :str=30, snake_case :List[Any]=400, snake_case :Any=True, snake_case :Dict=None, snake_case :Any=True, snake_case :Dict=None, snake_case :List[Any]=True, snake_case :Optional[int]=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], snake_case :Any=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], snake_case :int=True, ):
"""simple docstring"""
_lowercase =size if size is not None else {'height': 224, 'width': 224}
_lowercase =crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowercase =parent
_lowercase =batch_size
_lowercase =num_channels
_lowercase =image_size
_lowercase =min_resolution
_lowercase =max_resolution
_lowercase =do_resize
_lowercase =size
_lowercase =do_center_crop
_lowercase =crop_size
_lowercase =do_normalize
_lowercase =image_mean
_lowercase =image_std
_lowercase =do_convert_rgb
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCamelCase__ ( self :Dict, snake_case :List[Any]=False, snake_case :Any=False, snake_case :Union[str, Any]=False):
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_lowercase =[]
for i in range(self.batch_size):
image_inputs.append(
np.random.randint(
255, size=(self.num_channels, self.max_resolution, self.max_resolution), dtype=np.uinta))
else:
_lowercase =[]
for i in range(self.batch_size):
_lowercase , _lowercase =np.random.choice(np.arange(self.min_resolution, self.max_resolution), 2)
image_inputs.append(np.random.randint(255, size=(self.num_channels, width, height), dtype=np.uinta))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_lowercase =[Image.fromarray(np.moveaxis(snake_case, 0, -1)) for x in image_inputs]
if torchify:
_lowercase =[torch.from_numpy(snake_case) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : List[Any] =ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =ChineseCLIPImageProcessingTester(self, do_center_crop=snake_case)
@property
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(snake_case, 'do_resize'))
self.assertTrue(hasattr(snake_case, 'size'))
self.assertTrue(hasattr(snake_case, 'do_center_crop'))
self.assertTrue(hasattr(snake_case, 'center_crop'))
self.assertTrue(hasattr(snake_case, 'do_normalize'))
self.assertTrue(hasattr(snake_case, 'image_mean'))
self.assertTrue(hasattr(snake_case, 'image_std'))
self.assertTrue(hasattr(snake_case, 'do_convert_rgb'))
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {'height': 224, 'width': 224})
self.assertEqual(image_processor.crop_size, {'height': 18, 'width': 18})
_lowercase =self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84)
self.assertEqual(image_processor.size, {'shortest_edge': 42})
self.assertEqual(image_processor.crop_size, {'height': 84, 'width': 84})
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
pass
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowercase =self.image_processor_tester.prepare_inputs(equal_resolution=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case, Image.Image)
# Test not batched input
_lowercase =image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
_lowercase =image_processing(snake_case, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowercase =self.image_processor_tester.prepare_inputs(equal_resolution=snake_case, numpify=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case, np.ndarray)
# Test not batched input
_lowercase =image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
_lowercase =image_processing(snake_case, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowercase =self.image_processor_tester.prepare_inputs(equal_resolution=snake_case, torchify=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case, torch.Tensor)
# Test not batched input
_lowercase =image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
_lowercase =image_processing(snake_case, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[Any] =ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =ChineseCLIPImageProcessingTester(self, num_channels=4, do_center_crop=snake_case)
_lowercase =3
@property
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(snake_case, 'do_resize'))
self.assertTrue(hasattr(snake_case, 'size'))
self.assertTrue(hasattr(snake_case, 'do_center_crop'))
self.assertTrue(hasattr(snake_case, 'center_crop'))
self.assertTrue(hasattr(snake_case, 'do_normalize'))
self.assertTrue(hasattr(snake_case, 'image_mean'))
self.assertTrue(hasattr(snake_case, 'image_std'))
self.assertTrue(hasattr(snake_case, 'do_convert_rgb'))
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
pass
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowercase =self.image_processor_tester.prepare_inputs(equal_resolution=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case, Image.Image)
# Test not batched input
_lowercase =image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
_lowercase =image_processing(snake_case, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
| 181
| 1
|
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(__magic_name__ ):
for j in range(__magic_name__ ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase :Optional[int] = [[float("""inf""" ) for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
for i in range(__magic_name__ ):
for j in range(__magic_name__ ):
UpperCamelCase :List[str] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__magic_name__ ):
# looping through rows of graph array
for i in range(__magic_name__ ):
# looping through columns of graph array
for j in range(__magic_name__ ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
UpperCamelCase :Tuple = dist[i][k] + dist[k][j]
_print_dist(__magic_name__ , __magic_name__ )
return dist, v
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = int(input('''Enter number of vertices: '''))
UpperCAmelCase_ : str = int(input('''Enter number of edges: '''))
UpperCAmelCase_ : List[str] = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
UpperCAmelCase_ : Dict = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
UpperCAmelCase_ : List[Any] = int(input('''Enter source:'''))
UpperCAmelCase_ : Any = int(input('''Enter destination:'''))
UpperCAmelCase_ : Optional[int] = float(input('''Enter weight:'''))
UpperCAmelCase_ : List[str] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 705
|
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCAmelCase_ : Any = sys.version_info >= (3, 10)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str=None , __magic_name__ : Any=None ) -> Any:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=__magic_name__ )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : int
snake_case__ : float
snake_case__ : str
snake_case__ : bool
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : int = 4_2
snake_case__ : str = field(default="""toto""" , metadata={"""help""": """help message"""} )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : bool = False
snake_case__ : bool = True
snake_case__ : Optional[bool] = None
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Optional[Any] = """titi"""
snake_case__ : Optional[Any] = """toto"""
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : str = """titi"""
snake_case__ : Tuple = """toto"""
snake_case__ : Tuple = 4_2
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : BasicEnum = "toto"
def _A ( self : str ):
UpperCamelCase :List[Any] = BasicEnum(self.foo )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : MixedTypeEnum = "toto"
def _A ( self : str ):
UpperCamelCase :List[str] = MixedTypeEnum(self.foo )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : Optional[int] = None
snake_case__ : Optional[float] = field(default=_a , metadata={"""help""": """help message"""} )
snake_case__ : Optional[str] = None
snake_case__ : Optional[List[str]] = list_field(default=[] )
snake_case__ : Optional[List[int]] = list_field(default=[] )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : List[int] = list_field(default=[] )
snake_case__ : List[int] = list_field(default=[1, 2, 3] )
snake_case__ : List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
snake_case__ : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : List[int] = field()
snake_case__ : str = field()
snake_case__ : BasicEnum = field()
def _A ( self : Dict ):
UpperCamelCase :List[str] = BasicEnum(self.required_enum )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : int
snake_case__ : "BasicEnum" = field()
snake_case__ : "Optional[bool]" = None
snake_case__ : "str" = field(default="""toto""" , metadata={"""help""": """help message"""} )
snake_case__ : "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : bool = False
snake_case__ : bool = True
snake_case__ : bool | None = None
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : int | None = None
snake_case__ : float | None = field(default=_a , metadata={"""help""": """help message"""} )
snake_case__ : str | None = None
snake_case__ : list[str] | None = list_field(default=[] )
snake_case__ : list[int] | None = list_field(default=[] )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Dict , __lowerCamelCase : argparse.ArgumentParser , __lowerCamelCase : argparse.ArgumentParser ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCamelCase :List[Any] = {k: v for k, v in vars(__lowerCamelCase ).items() if k != """container"""}
UpperCamelCase :Union[str, Any] = {k: v for k, v in vars(__lowerCamelCase ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , __lowerCamelCase ) and yy.get("""choices""" , __lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](__lowerCamelCase ) , yy["""type"""](__lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[Any] ):
UpperCamelCase :List[Any] = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :List[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument("""--bar""" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument("""--baz""" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument("""--flag""" , type=__lowerCamelCase , default=__lowerCamelCase , const=__lowerCamelCase , nargs="""?""" )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :str = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((UpperCamelCase) , ) :List[Any] = parser.parse_args_into_dataclasses(__lowerCamelCase , look_for_args_file=__lowerCamelCase )
self.assertFalse(example.flag )
def _A ( self : str ):
UpperCamelCase :Union[str, Any] = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :List[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=__lowerCamelCase )
expected.add_argument("""--baz""" , default="""toto""" , type=__lowerCamelCase , help="""help message""" )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[int] ):
UpperCamelCase :Optional[int] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__lowerCamelCase , default=__lowerCamelCase , const=__lowerCamelCase , nargs="""?""" )
expected.add_argument("""--baz""" , type=__lowerCamelCase , default=__lowerCamelCase , const=__lowerCamelCase , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=__lowerCamelCase , dest="""baz""" )
expected.add_argument("""--opt""" , type=__lowerCamelCase , default=__lowerCamelCase )
UpperCamelCase :Tuple = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__lowerCamelCase )
for dataclass_type in dataclass_types:
UpperCamelCase :Union[str, Any] = HfArgumentParser(__lowerCamelCase )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Tuple = parser.parse_args([] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
UpperCamelCase :Any = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
UpperCamelCase :Optional[int] = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
UpperCamelCase :List[Any] = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
UpperCamelCase :Optional[int] = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
def _A ( self : Any ):
UpperCamelCase :Optional[int] = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Tuple = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
UpperCamelCase :str = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCamelCase :str = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
UpperCamelCase :Tuple = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCamelCase :Optional[int] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
UpperCamelCase :List[Any] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _A ( self : List[str] ):
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : Literal["titi", "toto", 4_2] = "toto"
UpperCamelCase :Optional[Any] = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :List[str] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Optional[Any] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
UpperCamelCase :int = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
UpperCamelCase :List[str] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def _A ( self : Tuple ):
UpperCamelCase :Any = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :int = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=__lowerCamelCase )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=__lowerCamelCase )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=__lowerCamelCase )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=__lowerCamelCase )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[Any] = parser.parse_args([] )
self.assertEqual(
__lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCamelCase :Tuple = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(__lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def _A ( self : Optional[Any] ):
UpperCamelCase :Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=__lowerCamelCase , type=__lowerCamelCase )
expected.add_argument("""--bar""" , default=__lowerCamelCase , type=__lowerCamelCase , help="""help message""" )
expected.add_argument("""--baz""" , default=__lowerCamelCase , type=__lowerCamelCase )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=__lowerCamelCase )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=__lowerCamelCase )
UpperCamelCase :List[Any] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__lowerCamelCase )
for dataclass_type in dataclass_types:
UpperCamelCase :List[Any] = HfArgumentParser(__lowerCamelCase )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Tuple = parser.parse_args([] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , bar=__lowerCamelCase , baz=__lowerCamelCase , ces=[] , des=[] ) )
UpperCamelCase :List[str] = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(__lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def _A ( self : Any ):
UpperCamelCase :Any = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :Dict = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument("""--required_str""" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=__lowerCamelCase , )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : List[Any] ):
UpperCamelCase :Dict = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :Optional[int] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=__lowerCamelCase , )
expected.add_argument("""--opt""" , type=__lowerCamelCase , default=__lowerCamelCase )
expected.add_argument("""--baz""" , default="""toto""" , type=__lowerCamelCase , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=__lowerCamelCase )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Any ):
UpperCamelCase :Optional[int] = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :Optional[int] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
UpperCamelCase :List[str] = parser.parse_dict(__lowerCamelCase )[0]
UpperCamelCase :Union[str, Any] = BasicExample(**__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : List[str] ):
UpperCamelCase :Optional[Any] = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :int = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(__lowerCamelCase , parser.parse_dict , __lowerCamelCase , allow_extra_keys=__lowerCamelCase )
def _A ( self : List[str] ):
UpperCamelCase :Tuple = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :Dict = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase :int = os.path.join(__lowerCamelCase , """temp_json""" )
os.mkdir(__lowerCamelCase )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
UpperCamelCase :Dict = BasicExample(**__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[int] ):
UpperCamelCase :Tuple = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :Any = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase :Any = os.path.join(__lowerCamelCase , """temp_yaml""" )
os.mkdir(__lowerCamelCase )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[Any] = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
UpperCamelCase :List[str] = BasicExample(**__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[int] ):
UpperCamelCase :Optional[Any] = HfArgumentParser(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
| 590
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Union[str, Any] = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 371
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
A : int = logging.get_logger(__name__)
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
__snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> int:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__snake_case = ""
else:
__snake_case = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
__snake_case = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__snake_case = in_proj_weight[
: config.hidden_size, :
]
__snake_case = in_proj_bias[: config.hidden_size]
__snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case = in_proj_weight[
-config.hidden_size :, :
]
__snake_case = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
__snake_case = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
'''simple docstring'''
__snake_case = dct.pop(_lowerCAmelCase )
__snake_case = val
def _lowerCAmelCase ( ) -> Any:
'''simple docstring'''
__snake_case = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=True ) -> int:
'''simple docstring'''
__snake_case = ViTConfig()
# patch_size
if model_name[-1] == "8":
__snake_case = 8
# set labels if required
if not base_model:
__snake_case = 1000
__snake_case = "huggingface/label-files"
__snake_case = "imagenet-1k-id2label.json"
__snake_case = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
__snake_case = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__snake_case = 384
__snake_case = 1536
__snake_case = 12
__snake_case = 6
# load original model from torch hub
__snake_case = torch.hub.load("facebookresearch/dino:main" , _lowerCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__snake_case = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
__snake_case = create_rename_keys(_lowerCAmelCase , base_model=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
if base_model:
__snake_case = ViTModel(_lowerCAmelCase , add_pooling_layer=_lowerCAmelCase ).eval()
else:
__snake_case = ViTForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
__snake_case = ViTImageProcessor()
__snake_case = image_processor(images=prepare_img() , return_tensors="pt" )
__snake_case = encoding["pixel_values"]
__snake_case = model(_lowerCAmelCase )
if base_model:
__snake_case = original_model(_lowerCAmelCase )
assert torch.allclose(_lowerCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
__snake_case = original_model(_lowerCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
A : Any = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 371
| 1
|
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=1000 ) -> Any:
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__SCREAMING_SNAKE_CASE = n - 1
__SCREAMING_SNAKE_CASE = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__SCREAMING_SNAKE_CASE = 0
while count < prec:
__SCREAMING_SNAKE_CASE = random.randint(2 , n - 1 )
__SCREAMING_SNAKE_CASE = bin_exp_mod(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if b != 1:
__SCREAMING_SNAKE_CASE = True
for _ in range(__UpperCAmelCase ):
if b == n - 1:
__SCREAMING_SNAKE_CASE = False
break
__SCREAMING_SNAKE_CASE = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
a = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 710
|
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
a = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
require_version(deps[pkg] , __UpperCAmelCase )
| 13
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """lxmert"""
UpperCamelCase = {}
def __init__( self :Union[str, Any] , lowerCamelCase_ :Union[str, Any]=3_05_22 , lowerCamelCase_ :Any=7_68 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :Dict=95_00 , lowerCamelCase_ :List[Any]=16_00 , lowerCamelCase_ :int=4_00 , lowerCamelCase_ :List[str]=30_72 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :List[str]=0.1 , lowerCamelCase_ :Union[str, Any]=5_12 , lowerCamelCase_ :Optional[Any]=2 , lowerCamelCase_ :str=0.0_2 , lowerCamelCase_ :int=1E-12 , lowerCamelCase_ :Any=9 , lowerCamelCase_ :int=5 , lowerCamelCase_ :Union[str, Any]=5 , lowerCamelCase_ :Tuple=20_48 , lowerCamelCase_ :List[str]=4 , lowerCamelCase_ :str=6.6_7 , lowerCamelCase_ :str=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :Tuple=True , **lowerCamelCase_ :Any , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : int = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = num_qa_labels
SCREAMING_SNAKE_CASE : Tuple = num_object_labels
SCREAMING_SNAKE_CASE : Optional[Any] = num_attr_labels
SCREAMING_SNAKE_CASE : int = l_layers
SCREAMING_SNAKE_CASE : str = x_layers
SCREAMING_SNAKE_CASE : List[Any] = r_layers
SCREAMING_SNAKE_CASE : Dict = visual_feat_dim
SCREAMING_SNAKE_CASE : Optional[int] = visual_pos_dim
SCREAMING_SNAKE_CASE : str = visual_loss_normalizer
SCREAMING_SNAKE_CASE : List[Any] = task_matched
SCREAMING_SNAKE_CASE : Optional[int] = task_mask_lm
SCREAMING_SNAKE_CASE : Optional[int] = task_obj_predict
SCREAMING_SNAKE_CASE : Dict = task_qa
SCREAMING_SNAKE_CASE : Tuple = visual_obj_loss
SCREAMING_SNAKE_CASE : int = visual_attr_loss
SCREAMING_SNAKE_CASE : Union[str, Any] = visual_feat_loss
SCREAMING_SNAKE_CASE : Tuple = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**lowerCamelCase_ )
| 698
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : List[Any] = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """mra"""
def __init__( self :int , lowerCamelCase_ :Optional[int]=5_02_65 , lowerCamelCase_ :List[str]=7_68 , lowerCamelCase_ :List[str]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :int=30_72 , lowerCamelCase_ :Tuple="gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :int=1E-5 , lowerCamelCase_ :List[Any]="absolute" , lowerCamelCase_ :str=4 , lowerCamelCase_ :List[str]="full" , lowerCamelCase_ :List[Any]=0 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :List[str]=0 , lowerCamelCase_ :List[Any]=2 , **lowerCamelCase_ :str , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : List[str] = block_per_row
SCREAMING_SNAKE_CASE : Optional[int] = approx_mode
SCREAMING_SNAKE_CASE : List[Any] = initial_prior_first_n_blocks
SCREAMING_SNAKE_CASE : Union[str, Any] = initial_prior_diagonal_n_blocks
| 698
| 1
|
"""simple docstring"""
from PIL import Image
def snake_case__ ( __lowerCamelCase : Image , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : List[str] =(259 * (level + 255)) / (255 * (259 - level))
def contrast(__lowerCamelCase : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change contrast to 170
_lowercase : str =change_contrast(img, 1_7_0)
cont_img.save("image_data/lena_high_contrast.png", format="png")
| 702
|
"""simple docstring"""
_lowercase : Optional[Any] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 625
| 0
|
from itertools import count
def _A (UpperCamelCase : int = 50 ) ->int:
'''simple docstring'''
lowerCamelCase__ : int = [1] * min_block_length
for n in count(UpperCamelCase ):
fill_count_functions.append(1 )
for block_length in range(UpperCamelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 157
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __A ( A_ ):
UpperCamelCase :List[str] = '''gpt_neo'''
UpperCamelCase :Tuple = ['''past_key_values''']
UpperCamelCase :Optional[int] = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__(self , __magic_name__=50257 , __magic_name__=2048 , __magic_name__=2048 , __magic_name__=24 , __magic_name__=[[["global", "local"], 12]] , __magic_name__=16 , __magic_name__=None , __magic_name__=256 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1E-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=50256 , __magic_name__=50256 , **__magic_name__ , ):
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : List[Any] = num_layers
lowerCamelCase__ : List[Any] = num_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = window_size
lowerCamelCase__ : List[Any] = activation_function
lowerCamelCase__ : Any = resid_dropout
lowerCamelCase__ : Dict = embed_dropout
lowerCamelCase__ : str = attention_dropout
lowerCamelCase__ : str = classifier_dropout
lowerCamelCase__ : str = layer_norm_epsilon
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : int = use_cache
lowerCamelCase__ : List[Any] = bos_token_id
lowerCamelCase__ : int = eos_token_id
lowerCamelCase__ : str = attention_types
lowerCamelCase__ : List[str] = self.expand_attention_types_params(__magic_name__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
f"`config.num_layers = {self.num_layers}`. "
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@staticmethod
def _snake_case (__magic_name__ ):
lowerCamelCase__ : Optional[Any] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _A (UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple ) ->int:
'''simple docstring'''
import torch
lowerCamelCase__ : Any = input.size()
lowerCamelCase__ : Tuple = len(UpperCamelCase )
lowerCamelCase__ : str = shape[dimension]
lowerCamelCase__ : Optional[int] = torch.arange(0 , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[Any] = torch.div(sizedim - size , UpperCamelCase , rounding_mode="""floor""" ) + 1
lowerCamelCase__ : Tuple = torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None]
lowerCamelCase__ : Dict = [slice(UpperCamelCase )] * rank
lowerCamelCase__ : Union[str, Any] = indices
lowerCamelCase__ : Optional[int] = input[s]
lowerCamelCase__ : int = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase )
def _A (UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ) ->Tuple:
'''simple docstring'''
import torch
lowerCamelCase__ : List[Any] = torch.arange(1 , UpperCamelCase )
lowerCamelCase__ : Any = torch.remainder(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[int] = remainders == 0
lowerCamelCase__ : List[str] = candidates[divisor_indices]
lowerCamelCase__ : List[Any] = torch.max(UpperCamelCase )
return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="""floor""" )
class __A ( A_ ):
@property
def _snake_case (self ):
lowerCamelCase__ : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
lowerCamelCase__ : Any = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase__ : int = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def _snake_case (self ):
return self._config.num_heads
def _snake_case (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ):
lowerCamelCase__ : Union[str, Any] = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ ,lowerCamelCase__ : str = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase__ : Any = seqlen + 2
lowerCamelCase__ : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ : Dict = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
lowerCamelCase__ : Dict = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase__ : int = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase__ : List[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def _snake_case (self ):
return 13
| 157
| 1
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__UpperCAmelCase = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__UpperCAmelCase = [0, 2_5, 5_0]
__UpperCAmelCase = [2_5, 5_0, 7_5]
__UpperCAmelCase = fuzz.membership.trimf(X, abca)
__UpperCAmelCase = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__UpperCAmelCase = np.ones(7_5)
__UpperCAmelCase = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
__UpperCAmelCase = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__UpperCAmelCase = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__UpperCAmelCase = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__UpperCAmelCase = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__UpperCAmelCase = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__UpperCAmelCase = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__UpperCAmelCase = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__UpperCAmelCase = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("Young")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("Middle aged")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("union")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("intersection")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("complement_a")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("difference a/b")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("alg_sum")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("alg_product")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("bdd_sum")
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title("bdd_difference")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 701
|
def A_ ( lowercase_ , lowercase_ ) ->int:
"""simple docstring"""
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError('String lengths must match!' )
SCREAMING_SNAKE_CASE = 0
for chara, chara in zip(lowercase_ , lowercase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCAmelCase = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308
|
"""simple docstring"""
def lowercase__ ( lowerCamelCase : int , lowerCamelCase : int ) -> int:
return int(input_a == input_a == 0 )
def lowercase__ ( ) -> None:
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F"| 0 | 0 | {nor_gate(0 , 0 )} |" )
print(F"| 0 | 1 | {nor_gate(0 , 1 )} |" )
print(F"| 1 | 0 | {nor_gate(1 , 0 )} |" )
print(F"| 1 | 1 | {nor_gate(1 , 1 )} |" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 308
| 1
|
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Optional[int] = ["vqvae"]
def __init__( self : Optional[int] , snake_case__ : AutoencoderKL , snake_case__ : UNetaDConditionModel , snake_case__ : Mel , snake_case__ : Union[DDIMScheduler, DDPMScheduler] , ):
super().__init__()
self.register_modules(unet=snake_case__ , scheduler=snake_case__ , mel=snake_case__ , vqvae=snake_case__ )
def UpperCAmelCase__ ( self : Optional[int] ):
return 50 if isinstance(self.scheduler , snake_case__ ) else 1000
@torch.no_grad()
def __call__( self : List[str] , snake_case__ : int = 1 , snake_case__ : str = None , snake_case__ : np.ndarray = None , snake_case__ : int = 0 , snake_case__ : int = 0 , snake_case__ : int = None , snake_case__ : torch.Generator = None , snake_case__ : float = 0 , snake_case__ : float = 0 , snake_case__ : torch.Generator = None , snake_case__ : float = 0 , snake_case__ : torch.Tensor = None , snake_case__ : torch.Tensor = None , snake_case__ : Any=True , ):
lowerCamelCase_ : Dict =steps or self.get_default_steps()
self.scheduler.set_timesteps(snake_case__ )
lowerCamelCase_ : Optional[Any] =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowerCamelCase_ : List[Any] =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowerCamelCase_ : Any =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=snake_case__ , device=self.device , )
lowerCamelCase_ : Union[str, Any] =noise
lowerCamelCase_ : Optional[Any] =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(snake_case__ , snake_case__ )
lowerCamelCase_ : Optional[Any] =self.mel.audio_slice_to_image(snake_case__ )
lowerCamelCase_ : Any =np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
lowerCamelCase_ : Tuple =(input_image / 255) * 2 - 1
lowerCamelCase_ : str =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowerCamelCase_ : str =self.vqvae.encode(torch.unsqueeze(snake_case__ , 0 ) ).latent_dist.sample(
generator=snake_case__ )[0]
lowerCamelCase_ : Any =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowerCamelCase_ : Any =self.scheduler.add_noise(snake_case__ , snake_case__ , self.scheduler.timesteps[start_step - 1] )
lowerCamelCase_ : Tuple =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowerCamelCase_ : List[str] =int(mask_start_secs * pixels_per_second )
lowerCamelCase_ : Union[str, Any] =int(mask_end_secs * pixels_per_second )
lowerCamelCase_ : Any =self.scheduler.add_noise(snake_case__ , snake_case__ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , snake_case__ ):
lowerCamelCase_ : str =self.unet(snake_case__ , snake_case__ , snake_case__ )["sample"]
else:
lowerCamelCase_ : Tuple =self.unet(snake_case__ , snake_case__ )["sample"]
if isinstance(self.scheduler , snake_case__ ):
lowerCamelCase_ : List[Any] =self.scheduler.step(
model_output=snake_case__ , timestep=snake_case__ , sample=snake_case__ , eta=snake_case__ , generator=snake_case__ , )["prev_sample"]
else:
lowerCamelCase_ : Optional[int] =self.scheduler.step(
model_output=snake_case__ , timestep=snake_case__ , sample=snake_case__ , generator=snake_case__ , )["prev_sample"]
if mask is not None:
if mask_start > 0:
lowerCamelCase_ : List[str] =mask[:, step, :, :mask_start]
if mask_end > 0:
lowerCamelCase_ : Any =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowerCamelCase_ : Tuple =1 / self.vqvae.config.scaling_factor * images
lowerCamelCase_ : str =self.vqvae.decode(snake_case__ )["sample"]
lowerCamelCase_ : Dict =(images / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ : Optional[int] =images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowerCamelCase_ : Any =(images * 255).round().astype("uint8" )
lowerCamelCase_ : Optional[Any] =list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(snake_case__ , mode="RGB" ).convert("L" ) for _ in images) )
lowerCamelCase_ : Optional[int] =[self.mel.image_to_audio(snake_case__ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(snake_case__ )[:, np.newaxis, :] ) , **ImagePipelineOutput(snake_case__ ) )
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : List[Image.Image] , snake_case__ : int = 50 ):
assert isinstance(self.scheduler , snake_case__ )
self.scheduler.set_timesteps(snake_case__ )
lowerCamelCase_ : List[str] =np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
lowerCamelCase_ : List[Any] =(sample / 255) * 2 - 1
lowerCamelCase_ : List[str] =torch.Tensor(snake_case__ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowerCamelCase_ : int =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowerCamelCase_ : str =self.scheduler.alphas_cumprod[t]
lowerCamelCase_ : Optional[int] =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowerCamelCase_ : Optional[Any] =1 - alpha_prod_t
lowerCamelCase_ : Any =self.unet(snake_case__ , snake_case__ )["sample"]
lowerCamelCase_ : Optional[int] =(1 - alpha_prod_t_prev) ** 0.5 * model_output
lowerCamelCase_ : Dict =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowerCamelCase_ : List[Any] =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase__ ( snake_case__ : torch.Tensor , snake_case__ : torch.Tensor , snake_case__ : float ):
lowerCamelCase_ : str =acos(torch.dot(torch.flatten(snake_case__ ) , torch.flatten(snake_case__ ) ) / torch.norm(snake_case__ ) / torch.norm(snake_case__ ) )
return sin((1 - alpha) * theta ) * xa / sin(snake_case__ ) + sin(alpha * theta ) * xa / sin(snake_case__ )
| 721
|
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :BigBirdConfig
_UpperCAmelCase :jnp.dtype = jnp.floataa
_UpperCAmelCase :bool = True
def UpperCAmelCase__ ( self : Union[str, Any] ):
super().setup()
lowerCamelCase_ : List[Any] =nn.Dense(5 , dtype=self.dtype )
def __call__( self : str , *snake_case__ : Optional[int] , **snake_case__ : List[str] ):
lowerCamelCase_ : List[str] =super().__call__(*snake_case__ , **snake_case__ )
lowerCamelCase_ : Any =self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :str = FlaxBigBirdForNaturalQuestionsModule
def _snake_case ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] ) -> Dict:
def cross_entropy(lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=None ):
lowerCamelCase_ : List[str] =logits.shape[-1]
lowerCamelCase_ : Tuple =(labels[..., None] == jnp.arange(lowerCamelCase__ )[None]).astype("f4" )
lowerCamelCase_ : Any =jax.nn.log_softmax(lowerCamelCase__ , axis=-1 )
lowerCamelCase_ : str =-jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowerCamelCase_ : int =reduction(lowerCamelCase__ )
return loss
lowerCamelCase_ : str =partial(lowerCamelCase__ , reduction=jnp.mean )
lowerCamelCase_ : Union[str, Any] =cross_entropy(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : int =cross_entropy(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : int =cross_entropy(lowerCamelCase__ , lowerCamelCase__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowercase__ :
_UpperCAmelCase :str = "google/bigbird-roberta-base"
_UpperCAmelCase :int = 3000
_UpperCAmelCase :int = 10500
_UpperCAmelCase :int = 128
_UpperCAmelCase :int = 3
_UpperCAmelCase :int = 1
_UpperCAmelCase :int = 5
# tx_args
_UpperCAmelCase :float = 3e-5
_UpperCAmelCase :float = 0.0
_UpperCAmelCase :int = 20000
_UpperCAmelCase :float = 0.00_95
_UpperCAmelCase :str = "bigbird-roberta-natural-questions"
_UpperCAmelCase :str = "training-expt"
_UpperCAmelCase :str = "data/nq-training.jsonl"
_UpperCAmelCase :str = "data/nq-validation.jsonl"
def UpperCAmelCase__ ( self : List[Any] ):
os.makedirs(self.base_dir , exist_ok=snake_case__ )
lowerCamelCase_ : List[Any] =os.path.join(self.base_dir , self.save_dir )
lowerCamelCase_ : str =self.batch_size_per_device * jax.device_count()
@dataclass
class lowercase__ :
_UpperCAmelCase :int
_UpperCAmelCase :int = 4096 # no dynamic padding on TPUs
def __call__( self : int , snake_case__ : str ):
lowerCamelCase_ : Any =self.collate_fn(snake_case__ )
lowerCamelCase_ : Tuple =jax.tree_util.tree_map(snake_case__ , snake_case__ )
return batch
def UpperCAmelCase__ ( self : Dict , snake_case__ : Any ):
lowerCamelCase_ , lowerCamelCase_ : Any =self.fetch_inputs(features["input_ids"] )
lowerCamelCase_ : Optional[int] ={
"input_ids": jnp.array(snake_case__ , dtype=jnp.intaa ),
"attention_mask": jnp.array(snake_case__ , dtype=jnp.intaa ),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ),
}
return batch
def UpperCAmelCase__ ( self : List[str] , snake_case__ : list ):
lowerCamelCase_ : Dict =[self._fetch_inputs(snake_case__ ) for ids in input_ids]
return zip(*snake_case__ )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : list ):
lowerCamelCase_ : Any =[1 for _ in range(len(snake_case__ ) )]
while len(snake_case__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _snake_case ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str=None ) -> Tuple:
if seed is not None:
lowerCamelCase_ : List[Any] =dataset.shuffle(seed=lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) // batch_size ):
lowerCamelCase_ : Any =dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowerCamelCase__ )
@partial(jax.pmap , axis_name="batch" )
def _snake_case ( lowerCamelCase__ : str , lowerCamelCase__ : Tuple , **lowerCamelCase__ : int ) -> str:
def loss_fn(lowerCamelCase__ : Optional[int] ):
lowerCamelCase_ : Any =model_inputs.pop("start_labels" )
lowerCamelCase_ : Dict =model_inputs.pop("end_labels" )
lowerCamelCase_ : Union[str, Any] =model_inputs.pop("pooled_labels" )
lowerCamelCase_ : Optional[Any] =state.apply_fn(**lowerCamelCase__ , params=lowerCamelCase__ , dropout_rng=lowerCamelCase__ , train=lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Tuple =outputs
return state.loss_fn(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
lowerCamelCase_ , lowerCamelCase_ : Any =jax.random.split(lowerCamelCase__ )
lowerCamelCase_ : str =jax.value_and_grad(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =grad_fn(state.params )
lowerCamelCase_ : int =jax.lax.pmean({"loss": loss} , axis_name="batch" )
lowerCamelCase_ : Optional[Any] =jax.lax.pmean(lowerCamelCase__ , "batch" )
lowerCamelCase_ : str =state.apply_gradients(grads=lowerCamelCase__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def _snake_case ( lowerCamelCase__ : int , **lowerCamelCase__ : str ) -> List[str]:
lowerCamelCase_ : List[str] =model_inputs.pop("start_labels" )
lowerCamelCase_ : str =model_inputs.pop("end_labels" )
lowerCamelCase_ : int =model_inputs.pop("pooled_labels" )
lowerCamelCase_ : List[Any] =state.apply_fn(**lowerCamelCase__ , params=state.params , train=lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[Any] =outputs
lowerCamelCase_ : List[Any] =state.loss_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Union[str, Any] =jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class lowercase__ ( train_state.TrainState ):
_UpperCAmelCase :Callable = struct.field(pytree_node=snake_case__ )
@dataclass
class lowercase__ :
_UpperCAmelCase :Args
_UpperCAmelCase :Callable
_UpperCAmelCase :Callable
_UpperCAmelCase :Callable
_UpperCAmelCase :Callable
_UpperCAmelCase :wandb
_UpperCAmelCase :Callable = None
def UpperCAmelCase__ ( self : str , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : List[Any]=None ):
lowerCamelCase_ : Union[str, Any] =model.params
lowerCamelCase_ : Tuple =TrainState.create(
apply_fn=model.__call__ , params=snake_case__ , tx=snake_case__ , loss_fn=snake_case__ , )
if ckpt_dir is not None:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =restore_checkpoint(snake_case__ , snake_case__ )
lowerCamelCase_ : Optional[Any] ={
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
lowerCamelCase_ , lowerCamelCase_ : List[str] =build_tx(**snake_case__ )
lowerCamelCase_ : str =train_state.TrainState(
step=snake_case__ , apply_fn=model.__call__ , params=snake_case__ , tx=snake_case__ , opt_state=snake_case__ , )
lowerCamelCase_ : Any =args
lowerCamelCase_ : Optional[Any] =data_collator
lowerCamelCase_ : int =lr
lowerCamelCase_ : List[Any] =params
lowerCamelCase_ : Tuple =jax_utils.replicate(snake_case__ )
return state
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] ):
lowerCamelCase_ : Union[str, Any] =self.args
lowerCamelCase_ : List[Any] =len(snake_case__ ) // args.batch_size
lowerCamelCase_ : Tuple =jax.random.PRNGKey(0 )
lowerCamelCase_ : int =jax.random.split(snake_case__ , jax.device_count() )
for epoch in range(args.max_epochs ):
lowerCamelCase_ : str =jnp.array(0 , dtype=jnp.floataa )
lowerCamelCase_ : int =get_batched_dataset(snake_case__ , args.batch_size , seed=snake_case__ )
lowerCamelCase_ : Any =0
for batch in tqdm(snake_case__ , total=snake_case__ , desc=F"""Running EPOCH-{epoch}""" ):
lowerCamelCase_ : Dict =self.data_collator(snake_case__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict =self.train_step_fn(snake_case__ , snake_case__ , **snake_case__ )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
lowerCamelCase_ : int =jax_utils.unreplicate(state.step )
lowerCamelCase_ : Any =running_loss.item() / i
lowerCamelCase_ : Tuple =self.scheduler_fn(state_step - 1 )
lowerCamelCase_ : Tuple =self.evaluate(snake_case__ , snake_case__ )
lowerCamelCase_ : Optional[Any] ={
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(snake_case__ ) )
self.logger.log(snake_case__ , commit=snake_case__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=snake_case__ )
def UpperCAmelCase__ ( self : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
lowerCamelCase_ : Union[str, Any] =get_batched_dataset(snake_case__ , self.args.batch_size )
lowerCamelCase_ : str =len(snake_case__ ) // self.args.batch_size
lowerCamelCase_ : int =jnp.array(0 , dtype=jnp.floataa )
lowerCamelCase_ : Any =0
for batch in tqdm(snake_case__ , total=snake_case__ , desc="Evaluating ... " ):
lowerCamelCase_ : str =self.data_collator(snake_case__ )
lowerCamelCase_ : Tuple =self.val_step_fn(snake_case__ , **snake_case__ )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def UpperCAmelCase__ ( self : List[str] , snake_case__ : str , snake_case__ : Any ):
lowerCamelCase_ : int =jax_utils.unreplicate(snake_case__ )
print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=" ... " )
self.model_save_fn(snake_case__ , params=state.params )
with open(os.path.join(snake_case__ , "opt_state.msgpack" ) , "wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(snake_case__ , "args.joblib" ) )
joblib.dump(self.data_collator , os.path.join(snake_case__ , "data_collator.joblib" ) )
with open(os.path.join(snake_case__ , "training_state.json" ) , "w" ) as f:
json.dump({"step": state.step.item()} , snake_case__ )
print("DONE" )
def _snake_case ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str ) -> Dict:
print(F"""RESTORING CHECKPOINT FROM {save_dir}""" , end=" ... " )
with open(os.path.join(lowerCamelCase__ , "flax_model.msgpack" ) , "rb" ) as f:
lowerCamelCase_ : List[Any] =from_bytes(state.params , f.read() )
with open(os.path.join(lowerCamelCase__ , "opt_state.msgpack" ) , "rb" ) as f:
lowerCamelCase_ : Tuple =from_bytes(state.opt_state , f.read() )
lowerCamelCase_ : Union[str, Any] =joblib.load(os.path.join(lowerCamelCase__ , "args.joblib" ) )
lowerCamelCase_ : str =joblib.load(os.path.join(lowerCamelCase__ , "data_collator.joblib" ) )
with open(os.path.join(lowerCamelCase__ , "training_state.json" ) , "r" ) as f:
lowerCamelCase_ : Union[str, Any] =json.load(lowerCamelCase__ )
lowerCamelCase_ : str =training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def _snake_case ( lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] ) -> str:
lowerCamelCase_ : Tuple =num_train_steps - warmup_steps
lowerCamelCase_ : Any =optax.linear_schedule(init_value=lowerCamelCase__ , end_value=lowerCamelCase__ , transition_steps=lowerCamelCase__ )
lowerCamelCase_ : int =optax.linear_schedule(init_value=lowerCamelCase__ , end_value=1e-7 , transition_steps=lowerCamelCase__ )
lowerCamelCase_ : Union[str, Any] =optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _snake_case ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str] ) -> List[str]:
def weight_decay_mask(lowerCamelCase__ : List[Any] ):
lowerCamelCase_ : Any =traverse_util.flatten_dict(lowerCamelCase__ )
lowerCamelCase_ : Tuple ={k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] =scheduler_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : List[str] =optax.adamw(learning_rate=lowerCamelCase__ , weight_decay=lowerCamelCase__ , mask=lowerCamelCase__ )
return tx, lr
| 244
| 0
|
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
_UpperCAmelCase = get_activation('''swish''' )
self.assertIsInstance(_UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase( self ):
_UpperCAmelCase = get_activation('''silu''' )
self.assertIsInstance(_UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase( self ):
_UpperCAmelCase = get_activation('''mish''' )
self.assertIsInstance(_UpperCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase( self ):
_UpperCAmelCase = get_activation('''gelu''' )
self.assertIsInstance(_UpperCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 32
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'detr'
lowercase__ = ['past_key_values']
lowercase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __a=True , __a=None , __a=3 , __a=1_00 , __a=6 , __a=20_48 , __a=8 , __a=6 , __a=20_48 , __a=8 , __a=0.0 , __a=0.0 , __a=True , __a="relu" , __a=2_56 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=1.0 , __a=False , __a="sine" , __a="resnet50" , __a=True , __a=False , __a=1 , __a=5 , __a=2 , __a=1 , __a=1 , __a=5 , __a=2 , __a=0.1 , **__a , ) -> int:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__a , __a):
_UpperCamelCase = backbone_config.get('''model_type''')
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(__a)
# set timm attributes to None
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None, None, None
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = encoder_layers
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a)
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def UpperCAmelCase ( cls , __a , **__a) -> int:
'''simple docstring'''
return cls(backbone_config=__a , **__a)
def UpperCAmelCase ( self) -> Dict[str, any]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = version.parse('1.11' )
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def UpperCAmelCase ( self) -> float:
'''simple docstring'''
return 1e-5
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 12
| 19
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
A__ = logging.get_logger(__name__)
A__ = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class __UpperCamelCase ( SCREAMING_SNAKE_CASE ):
_lowercase : Optional[Any] = "layoutlmv3"
def __init__( self: Tuple , __UpperCamelCase: List[Any]=5_02_65 , __UpperCamelCase: Union[str, Any]=7_68 , __UpperCamelCase: Optional[int]=12 , __UpperCamelCase: List[str]=12 , __UpperCamelCase: List[Any]=30_72 , __UpperCamelCase: Union[str, Any]="gelu" , __UpperCamelCase: Union[str, Any]=0.1 , __UpperCamelCase: Union[str, Any]=0.1 , __UpperCamelCase: List[Any]=5_12 , __UpperCamelCase: int=2 , __UpperCamelCase: Union[str, Any]=0.02 , __UpperCamelCase: Dict=1E-5 , __UpperCamelCase: int=1 , __UpperCamelCase: Dict=0 , __UpperCamelCase: Tuple=2 , __UpperCamelCase: Any=10_24 , __UpperCamelCase: Union[str, Any]=1_28 , __UpperCamelCase: Dict=1_28 , __UpperCamelCase: Any=True , __UpperCamelCase: Any=32 , __UpperCamelCase: int=1_28 , __UpperCamelCase: List[str]=64 , __UpperCamelCase: Union[str, Any]=2_56 , __UpperCamelCase: int=True , __UpperCamelCase: Optional[int]=True , __UpperCamelCase: Union[str, Any]=True , __UpperCamelCase: Union[str, Any]=2_24 , __UpperCamelCase: Tuple=3 , __UpperCamelCase: Any=16 , __UpperCamelCase: List[Any]=None , **__UpperCamelCase: str , ):
'''simple docstring'''
super().__init__(
vocab_size=__UpperCamelCase , hidden_size=__UpperCamelCase , num_hidden_layers=__UpperCamelCase , num_attention_heads=__UpperCamelCase , intermediate_size=__UpperCamelCase , hidden_act=__UpperCamelCase , hidden_dropout_prob=__UpperCamelCase , attention_probs_dropout_prob=__UpperCamelCase , max_position_embeddings=__UpperCamelCase , type_vocab_size=__UpperCamelCase , initializer_range=__UpperCamelCase , layer_norm_eps=__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
__magic_name__ = max_ad_position_embeddings
__magic_name__ = coordinate_size
__magic_name__ = shape_size
__magic_name__ = has_relative_attention_bias
__magic_name__ = rel_pos_bins
__magic_name__ = max_rel_pos
__magic_name__ = has_spatial_attention_bias
__magic_name__ = rel_ad_pos_bins
__magic_name__ = max_rel_ad_pos
__magic_name__ = text_embed
__magic_name__ = visual_embed
__magic_name__ = input_size
__magic_name__ = num_channels
__magic_name__ = patch_size
__magic_name__ = classifier_dropout
class __UpperCamelCase ( SCREAMING_SNAKE_CASE ):
_lowercase : Any = version.parse("1.12" )
@property
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self: int ):
'''simple docstring'''
return 1E-5
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[int] ):
'''simple docstring'''
return 12
def _SCREAMING_SNAKE_CASE ( self: Tuple , __UpperCamelCase: "ProcessorMixin" , __UpperCamelCase: int = -1 , __UpperCamelCase: int = -1 , __UpperCamelCase: bool = False , __UpperCamelCase: Optional["TensorType"] = None , __UpperCamelCase: int = 3 , __UpperCamelCase: int = 40 , __UpperCamelCase: int = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , 'apply_ocr' , __UpperCamelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__magic_name__ = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__magic_name__ = processor.tokenizer.num_special_tokens_to_add(__UpperCamelCase )
__magic_name__ = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
__magic_name__ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__magic_name__ = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__magic_name__ = self._generate_dummy_images(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__magic_name__ = dict(
processor(
__UpperCamelCase , text=__UpperCamelCase , boxes=__UpperCamelCase , return_tensors=__UpperCamelCase , ) )
return inputs
| 719
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __UpperCamelCase ( SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = "time_series_transformer"
_lowercase : Dict = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self: str , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: str = "student_t" , __UpperCamelCase: str = "nll" , __UpperCamelCase: int = 1 , __UpperCamelCase: List[int] = [1, 2, 3, 4, 5, 6, 7] , __UpperCamelCase: Optional[Union[str, bool]] = "mean" , __UpperCamelCase: int = 0 , __UpperCamelCase: int = 0 , __UpperCamelCase: int = 0 , __UpperCamelCase: int = 0 , __UpperCamelCase: Optional[List[int]] = None , __UpperCamelCase: Optional[List[int]] = None , __UpperCamelCase: int = 32 , __UpperCamelCase: int = 32 , __UpperCamelCase: int = 2 , __UpperCamelCase: int = 2 , __UpperCamelCase: int = 2 , __UpperCamelCase: int = 2 , __UpperCamelCase: bool = True , __UpperCamelCase: str = "gelu" , __UpperCamelCase: int = 64 , __UpperCamelCase: float = 0.1 , __UpperCamelCase: float = 0.1 , __UpperCamelCase: float = 0.1 , __UpperCamelCase: float = 0.1 , __UpperCamelCase: float = 0.1 , __UpperCamelCase: int = 1_00 , __UpperCamelCase: float = 0.02 , __UpperCamelCase: Any=True , **__UpperCamelCase: Optional[Any] , ):
'''simple docstring'''
__magic_name__ = prediction_length
__magic_name__ = context_length or prediction_length
__magic_name__ = distribution_output
__magic_name__ = loss
__magic_name__ = input_size
__magic_name__ = num_time_features
__magic_name__ = lags_sequence
__magic_name__ = scaling
__magic_name__ = num_dynamic_real_features
__magic_name__ = num_static_real_features
__magic_name__ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__magic_name__ = cardinality
else:
__magic_name__ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__magic_name__ = embedding_dimension
else:
__magic_name__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__magic_name__ = num_parallel_samples
# Transformer architecture configuration
__magic_name__ = input_size * len(__UpperCamelCase ) + self._number_of_features
__magic_name__ = d_model
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_attention_heads
__magic_name__ = encoder_ffn_dim
__magic_name__ = decoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = decoder_layers
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = use_cache
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 184
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.