code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : list ):
if not grid or not grid[0]:
raise TypeError('The grid does not contain the appropriate information' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowercase_ : int = grid[0]
for row_n in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
lowercase_ : Any = grid[row_n]
lowercase_ : Dict = fill_row(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = grid[row_n]
return grid[-1][-1]
def lowercase__( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list ):
current_row[0] += row_above[0]
for cell_n in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = parent
lowercase_ : Tuple = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Optional[Any] = is_training
lowercase_ : Any = use_input_mask
lowercase_ : Optional[Any] = vocab_size
lowercase_ : str = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Union[str, Any] = use_labels
lowercase_ : Union[str, Any] = scope
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : List[str] = None
if self.use_input_mask:
lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : str = self.prepare_config_and_inputs()
lowercase_ : int = True
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = True
lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Union[str, Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,)
lowercase_ : Dict = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int:
'''simple docstring'''
lowercase_ : List[str] = True
lowercase_ : Union[str, Any] = True
lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
# first forward pass
lowercase_ : str = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,)
lowercase_ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 )
lowercase_ : int = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
lowercase_ : List[Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
# select random slice
lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoderTester(self )
lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs()
lowercase_ : Optional[int] = 'bert'
self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase_ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Tuple = model(__UpperCamelCase )[0]
lowercase_ : Dict = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : str = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Dict = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : Dict = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321
| 1
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : bool = False ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : int = F'''Expected string as input, found {type(__SCREAMING_SNAKE_CASE )}'''
raise ValueError(__SCREAMING_SNAKE_CASE )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Tuple = F'''Expected boolean as use_pascal parameter, found {type(__SCREAMING_SNAKE_CASE )}'''
raise ValueError(__SCREAMING_SNAKE_CASE )
lowercase_ : str = input_str.split('_' )
lowercase_ : Optional[Any] = 0 if use_pascal else 1
lowercase_ : Any = words[start_index:]
lowercase_ : Union[str, Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowercase_ : List[str] = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 321
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
return None
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
return None
class UpperCamelCase ( unittest.TestCase ):
lowercase = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
from transformers import BertModel
lowercase_ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(__UpperCamelCase ) )
vocab_file.flush()
lowercase_ : List[str] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase_ : Optional[Any] = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) )
model.save_pretrained(__UpperCamelCase )
self._test_export(__UpperCamelCase ,'pt' ,12 ,__UpperCamelCase )
@require_tf
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Optional[int] = self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase )
lowercase_ : int = quantize(Path(__UpperCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Tuple = self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase )
lowercase_ : Tuple = quantize(__UpperCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase_ : Dict = Path(__UpperCamelCase ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
return path
except Exception as e:
self.fail(__UpperCamelCase )
@require_torch
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
from transformers import BertModel
lowercase_ : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'pt' )
@require_tf
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
from transformers import TFBertModel
lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase_ : Any = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'tf' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Tuple = FeatureExtractionPipeline(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Dict = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(__UpperCamelCase ,__UpperCamelCase )
# Assert all variables are present
self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] ,__UpperCamelCase )
self.assertSequenceEqual(variable_names[3:] ,__UpperCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = ['input_ids', 'attention_mask', 'token_type_ids']
lowercase_ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowercase_ , lowercase_ : int = ensure_valid_input(FuncContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__UpperCamelCase ) ,3 )
# Should have exactly the same input names
self.assertEqual(set(__UpperCamelCase ) ,set(__UpperCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__UpperCamelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase_ , lowercase_ : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__UpperCamelCase ) ,1 )
self.assertEqual(len(__UpperCamelCase ) ,1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] ,tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] ,'input_ids' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
| 321
| 1
|
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCamelCase ( lowercase_ ):
lowercase = 'xlnet'
lowercase = ['mems']
lowercase = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,__UpperCamelCase=3_2000 ,__UpperCamelCase=1024 ,__UpperCamelCase=24 ,__UpperCamelCase=16 ,__UpperCamelCase=4096 ,__UpperCamelCase="gelu" ,__UpperCamelCase=True ,__UpperCamelCase="bi" ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-12 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=False ,__UpperCamelCase=-1 ,__UpperCamelCase=False ,__UpperCamelCase="last" ,__UpperCamelCase=True ,__UpperCamelCase="tanh" ,__UpperCamelCase=0.1 ,__UpperCamelCase=5 ,__UpperCamelCase=5 ,__UpperCamelCase=5 ,__UpperCamelCase=1 ,__UpperCamelCase=2 ,**__UpperCamelCase ,) -> int:
'''simple docstring'''
lowercase_ : List[str] = vocab_size
lowercase_ : Optional[int] = d_model
lowercase_ : List[str] = n_layer
lowercase_ : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
lowercase_ : Optional[int] = d_model // n_head
lowercase_ : str = ff_activation
lowercase_ : Tuple = d_inner
lowercase_ : int = untie_r
lowercase_ : List[Any] = attn_type
lowercase_ : Any = initializer_range
lowercase_ : Dict = layer_norm_eps
lowercase_ : int = dropout
lowercase_ : Optional[Any] = mem_len
lowercase_ : int = reuse_len
lowercase_ : Any = bi_data
lowercase_ : Union[str, Any] = clamp_len
lowercase_ : Tuple = same_length
lowercase_ : str = summary_type
lowercase_ : Optional[int] = summary_use_proj
lowercase_ : Optional[int] = summary_activation
lowercase_ : Optional[int] = summary_last_dropout
lowercase_ : List[Any] = start_n_top
lowercase_ : Union[str, Any] = end_n_top
lowercase_ : Tuple = bos_token_id
lowercase_ : Tuple = pad_token_id
lowercase_ : Optional[int] = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' ,__UpperCamelCase ,)
lowercase_ : int = kwargs['use_cache']
lowercase_ : Tuple = use_mems_eval
lowercase_ : str = use_mems_train
super().__init__(pad_token_id=__UpperCamelCase ,bos_token_id=__UpperCamelCase ,eos_token_id=__UpperCamelCase ,**__UpperCamelCase )
@property
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 321
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]]
lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(__UpperCamelCase ) # fails here
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 )
lowercase_ : str = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 )
lowercase_ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 )
lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 321
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = tempfile.mkdtemp()
lowercase_ : Any = BlipImageProcessor()
lowercase_ : Dict = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
lowercase_ : Optional[int] = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
lowercase_ : Optional[Any] = InstructBlipProcessor(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase ).tokenizer
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase ).image_processor
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase ).qformer_tokenizer
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
lowercase_ : List[Any] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Tuple = InstructBlipProcessor(
tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ,qformer_tokenizer=self.get_qformer_tokenizer() ,)
processor.save_pretrained(self.tmpdirname )
lowercase_ : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
lowercase_ : int = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 )
lowercase_ : Optional[Any] = InstructBlipProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__UpperCamelCase )
self.assertIsInstance(processor.qformer_tokenizer ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : List[str] = self.get_image_processor()
lowercase_ : int = self.get_tokenizer()
lowercase_ : Optional[Any] = self.get_qformer_tokenizer()
lowercase_ : Dict = InstructBlipProcessor(
tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ,qformer_tokenizer=__UpperCamelCase )
lowercase_ : Any = self.prepare_image_inputs()
lowercase_ : Union[str, Any] = image_processor(__UpperCamelCase ,return_tensors='np' )
lowercase_ : int = processor(images=__UpperCamelCase ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.get_image_processor()
lowercase_ : Any = self.get_tokenizer()
lowercase_ : Union[str, Any] = self.get_qformer_tokenizer()
lowercase_ : Optional[Any] = InstructBlipProcessor(
tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ,qformer_tokenizer=__UpperCamelCase )
lowercase_ : List[Any] = 'lower newer'
lowercase_ : int = processor(text=__UpperCamelCase )
lowercase_ : int = tokenizer(__UpperCamelCase ,return_token_type_ids=__UpperCamelCase )
lowercase_ : List[str] = qformer_tokenizer(__UpperCamelCase ,return_token_type_ids=__UpperCamelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] ,encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] ,encoded_processor['qformer_' + key] )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Any = self.get_image_processor()
lowercase_ : Union[str, Any] = self.get_tokenizer()
lowercase_ : List[str] = self.get_qformer_tokenizer()
lowercase_ : Union[str, Any] = InstructBlipProcessor(
tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ,qformer_tokenizer=__UpperCamelCase )
lowercase_ : int = 'lower newer'
lowercase_ : List[str] = self.prepare_image_inputs()
lowercase_ : int = processor(text=__UpperCamelCase ,images=__UpperCamelCase )
self.assertListEqual(
list(inputs.keys() ) ,['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] ,)
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Tuple = self.get_image_processor()
lowercase_ : int = self.get_tokenizer()
lowercase_ : int = self.get_qformer_tokenizer()
lowercase_ : Optional[Any] = InstructBlipProcessor(
tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ,qformer_tokenizer=__UpperCamelCase )
lowercase_ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ : int = processor.batch_decode(__UpperCamelCase )
lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : str = self.get_image_processor()
lowercase_ : str = self.get_tokenizer()
lowercase_ : Dict = self.get_qformer_tokenizer()
lowercase_ : Optional[Any] = InstructBlipProcessor(
tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ,qformer_tokenizer=__UpperCamelCase )
lowercase_ : str = 'lower newer'
lowercase_ : Tuple = self.prepare_image_inputs()
lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase )
self.assertListEqual(
list(inputs.keys() ) ,['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] ,)
| 321
|
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
def get_masked_lm_array(__SCREAMING_SNAKE_CASE : str ):
lowercase_ : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[Any] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_array(__SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : Tuple = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : List[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[str] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_attention_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = array.reshape(__SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[str] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
print(F'''Loading model based on config from {config_path}...''' )
lowercase_ : Any = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = BertForMaskedLM(__SCREAMING_SNAKE_CASE )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
lowercase_ : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
lowercase_ : BertSelfAttention = layer.attention.self
lowercase_ : str = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_query_dense/kernel' , self_attn.query.weight.data.shape )
lowercase_ : Tuple = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_query_dense/bias' , self_attn.query.bias.data.shape )
lowercase_ : Tuple = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_key_dense/kernel' , self_attn.key.weight.data.shape )
lowercase_ : int = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_key_dense/bias' , self_attn.key.bias.data.shape )
lowercase_ : Dict = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_value_dense/kernel' , self_attn.value.weight.data.shape )
lowercase_ : List[Any] = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
lowercase_ : BertSelfOutput = layer.attention.output
lowercase_ : Dict = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_output_dense/kernel' , self_output.dense.weight.data.shape )
lowercase_ : Any = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_output_dense/bias' , self_output.dense.bias.data.shape )
lowercase_ : Tuple = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/gamma' )
lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/beta' )
# Intermediate
lowercase_ : BertIntermediate = layer.intermediate
lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/kernel' )
lowercase_ : Optional[int] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/bias' )
# Output
lowercase_ : BertOutput = layer.output
lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/kernel' )
lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/bias' )
lowercase_ : List[str] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/gamma' )
lowercase_ : int = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/beta' )
# Embeddings
lowercase_ : Optional[Any] = get_encoder_array('_position_embedding_layer/embeddings' )
lowercase_ : int = get_encoder_array('_type_embedding_layer/embeddings' )
lowercase_ : Any = get_encoder_array('_embedding_norm_layer/gamma' )
lowercase_ : Optional[Any] = get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
lowercase_ : int = model.cls.predictions.transform
lowercase_ : str = get_masked_lm_array('dense/kernel' )
lowercase_ : Optional[Any] = get_masked_lm_array('dense/bias' )
lowercase_ : Optional[Any] = get_masked_lm_array('layer_norm/gamma' )
lowercase_ : Optional[int] = get_masked_lm_array('layer_norm/beta' )
lowercase_ : List[str] = get_masked_lm_array('embedding_table' )
# Pooling
lowercase_ : Optional[Any] = BertPooler(config=__SCREAMING_SNAKE_CASE )
lowercase_ : BertPooler = get_encoder_array('_pooler_layer/kernel' )
lowercase_ : BertPooler = get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(__SCREAMING_SNAKE_CASE )
# Integration test - should load without any errors ;)
lowercase_ : Tuple = BertForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 321
| 1
|
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
__SCREAMING_SNAKE_CASE ="docs/source/en/_toctree.yml"
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : Optional[int] = defaultdict(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = []
lowercase_ : Tuple = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(__SCREAMING_SNAKE_CASE )
lowercase_ : str = new_doc_list
lowercase_ : List[Any] = [key for key, value in counts.items() if value > 1]
lowercase_ : Tuple = []
for duplicate_key in duplicates:
lowercase_ : str = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(__SCREAMING_SNAKE_CASE ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
lowercase_ : Optional[Any] = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__SCREAMING_SNAKE_CASE ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(__SCREAMING_SNAKE_CASE )
# Sort
return overview_doc
def lowercase__( __SCREAMING_SNAKE_CASE : Dict=False ):
with open(__SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
lowercase_ : Optional[int] = yaml.safe_load(f.read() )
# Get to the API doc
lowercase_ : Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase_ : int = content[api_idx]['sections']
# Then to the model doc
lowercase_ : str = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowercase_ : Union[str, Any] = api_doc[scheduler_idx]['sections']
lowercase_ : str = clean_doc_toc(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = False
if new_scheduler_doc != scheduler_doc:
lowercase_ : List[Any] = True
if overwrite:
lowercase_ : Optional[Any] = new_scheduler_doc
if diff:
if overwrite:
lowercase_ : Tuple = api_doc
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__SCREAMING_SNAKE_CASE , allow_unicode=__SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def lowercase__( __SCREAMING_SNAKE_CASE : str=False ):
with open(__SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
lowercase_ : str = yaml.safe_load(f.read() )
# Get to the API doc
lowercase_ : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase_ : Optional[int] = content[api_idx]['sections']
# Then to the model doc
lowercase_ : str = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowercase_ : List[Any] = False
lowercase_ : List[str] = api_doc[pipeline_idx]['sections']
lowercase_ : Optional[Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowercase_ : int = pipeline_doc['section']
lowercase_ : str = clean_doc_toc(__SCREAMING_SNAKE_CASE )
if overwrite:
lowercase_ : Any = new_sub_pipeline_doc
new_pipeline_docs.append(__SCREAMING_SNAKE_CASE )
# sort overall pipeline doc
lowercase_ : Optional[Any] = clean_doc_toc(__SCREAMING_SNAKE_CASE )
if new_pipeline_docs != pipeline_docs:
lowercase_ : int = True
if overwrite:
lowercase_ : List[Any] = new_pipeline_docs
if diff:
if overwrite:
lowercase_ : Union[str, Any] = api_doc
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__SCREAMING_SNAKE_CASE , allow_unicode=__SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__SCREAMING_SNAKE_CASE =parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 321
|
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
__SCREAMING_SNAKE_CASE =namedtuple("covid_data", "cases deaths recovered")
def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ):
lowercase_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 321
| 1
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : int = 4_00_00_00 ):
lowercase_ : str = []
lowercase_ , lowercase_ : List[Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : List[str] = b, a + b
return sum(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"{solution() = }")
| 321
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 321
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class UpperCamelCase :
lowercase = 42 # [batch_size x 3]
lowercase = 42 # [batch_size x 3]
lowercase = 42 # [batch_size x 3]
lowercase = 42 # [batch_size x 3]
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] ,dtype=np.floataa ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] ,dtype=np.floataa ) )
def _UpperCAmelCase ( self ) -> torch.Tensor:
'''simple docstring'''
lowercase_ : Dict = torch.arange(self.height * self.width )
lowercase_ : str = torch.stack(
[
pixel_indices % self.width,
torch.div(__UpperCamelCase ,self.width ,rounding_mode='trunc' ),
] ,axis=1 ,)
return coords
@property
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ , *lowercase_ : List[Any] = self.shape
lowercase_ : List[Any] = int(np.prod(__UpperCamelCase ) )
lowercase_ : Dict = self.get_image_coords()
lowercase_ : Tuple = torch.broadcast_to(coords.unsqueeze(0 ) ,[batch_size * inner_batch_size, *coords.shape] )
lowercase_ : List[Any] = self.get_camera_rays(__UpperCamelCase )
lowercase_ : Tuple = rays.view(__UpperCamelCase ,inner_batch_size * self.height * self.width ,2 ,3 )
return rays
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> torch.Tensor:
'''simple docstring'''
lowercase_ , *lowercase_ , lowercase_ : Tuple = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
lowercase_ : Dict = coords.view(__UpperCamelCase ,-1 ,2 )
lowercase_ : int = self.resolution()
lowercase_ : List[Any] = self.fov()
lowercase_ : Tuple = (flat.float() / (res - 1)) * 2 - 1
lowercase_ : Tuple = fracs * torch.tan(fov / 2 )
lowercase_ : str = fracs.view(__UpperCamelCase ,-1 ,2 )
lowercase_ : Optional[Any] = (
self.z.view(__UpperCamelCase ,1 ,3 )
+ self.x.view(__UpperCamelCase ,1 ,3 ) * fracs[:, :, :1]
+ self.y.view(__UpperCamelCase ,1 ,3 ) * fracs[:, :, 1:]
)
lowercase_ : Tuple = directions / directions.norm(dim=-1 ,keepdim=__UpperCamelCase )
lowercase_ : Union[str, Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(__UpperCamelCase ,1 ,3 ) ,[batch_size, directions.shape[1], 3] ),
directions,
] ,dim=2 ,)
return rays.view(__UpperCamelCase ,*__UpperCamelCase ,2 ,3 )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> "DifferentiableProjectiveCamera":
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin ,x=self.x ,y=self.y ,z=self.z ,width=__UpperCamelCase ,height=__UpperCamelCase ,x_fov=self.x_fov ,y_fov=self.y_fov ,)
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Optional[Any] = []
lowercase_ : Dict = []
lowercase_ : Tuple = []
lowercase_ : List[Any] = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
lowercase_ : Tuple = np.array([np.sin(__SCREAMING_SNAKE_CASE ), np.cos(__SCREAMING_SNAKE_CASE ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
lowercase_ : Optional[int] = -z * 4
lowercase_ : List[str] = np.array([np.cos(__SCREAMING_SNAKE_CASE ), -np.sin(__SCREAMING_SNAKE_CASE ), 0.0] )
lowercase_ : Tuple = np.cross(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
origins.append(__SCREAMING_SNAKE_CASE )
xs.append(__SCREAMING_SNAKE_CASE )
ys.append(__SCREAMING_SNAKE_CASE )
zs.append(__SCREAMING_SNAKE_CASE )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__SCREAMING_SNAKE_CASE , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__SCREAMING_SNAKE_CASE , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__SCREAMING_SNAKE_CASE , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__SCREAMING_SNAKE_CASE , axis=0 ) ).float() , width=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__SCREAMING_SNAKE_CASE )) , )
| 321
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=33 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_input_mask
lowercase_ : Optional[Any] = use_token_type_ids
lowercase_ : List[str] = use_labels
lowercase_ : Any = vocab_size
lowercase_ : List[str] = hidden_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : Tuple = max_position_embeddings
lowercase_ : Optional[int] = type_vocab_size
lowercase_ : Optional[int] = type_sequence_label_size
lowercase_ : Dict = initializer_range
lowercase_ : int = num_labels
lowercase_ : Any = num_choices
lowercase_ : int = scope
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Dict = None
if self.use_input_mask:
lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Tuple = None
lowercase_ : Tuple = None
lowercase_ : Tuple = None
if self.use_labels:
lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
lowercase_ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : List[Any] = EsmModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Tuple = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Union[str, Any] = model(__UpperCamelCase )
lowercase_ : int = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = EsmForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = self.num_labels
lowercase_ : int = EsmForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Any = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Optional[int] = config_and_inputs
lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = False
lowercase = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = ()
lowercase = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = EsmModelTester(self )
lowercase_ : List[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ : Optional[Any] = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = EsmModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowercase_ : str = EsmEmbeddings(config=__UpperCamelCase )
lowercase_ : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowercase_ : List[Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowercase_ : Tuple = create_position_ids_from_input_ids(__UpperCamelCase ,model.padding_idx )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowercase_ : List[Any] = EsmEmbeddings(config=__UpperCamelCase )
lowercase_ : List[Any] = torch.empty(2 ,4 ,30 )
lowercase_ : List[str] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowercase_ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowercase_ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCamelCase ( lowercase_ ):
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
with torch.no_grad():
lowercase_ : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase_ : List[str] = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = 33
lowercase_ : Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : List[str] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
lowercase_ : int = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase_ : Dict = model(__UpperCamelCase )[0]
# compare the actual values for a slice.
lowercase_ : Any = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321
| 1
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : int | float | str ):
try:
lowercase_ : List[str] = float(__SCREAMING_SNAKE_CASE )
except ValueError:
raise ValueError('Please enter a valid number' )
lowercase_ : List[str] = decimal - int(__SCREAMING_SNAKE_CASE )
if fractional_part == 0:
return int(__SCREAMING_SNAKE_CASE ), 1
else:
lowercase_ : int = len(str(__SCREAMING_SNAKE_CASE ).split('.' )[1] )
lowercase_ : Any = int(decimal * (10**number_of_frac_digits) )
lowercase_ : int = 10**number_of_frac_digits
lowercase_ , lowercase_ : str = denominator, numerator
while True:
lowercase_ : List[str] = dividend % divisor
if remainder == 0:
break
lowercase_ , lowercase_ : Tuple = divisor, remainder
lowercase_ , lowercase_ : Dict = numerator / divisor, denominator / divisor
return int(__SCREAMING_SNAKE_CASE ), int(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"{decimal_to_fraction(2) = }")
print(F"{decimal_to_fraction(89.0) = }")
print(F"{decimal_to_fraction('67') = }")
print(F"{decimal_to_fraction('45.0') = }")
print(F"{decimal_to_fraction(1.5) = }")
print(F"{decimal_to_fraction('6.25') = }")
print(F"{decimal_to_fraction('78td') = }")
| 321
|
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[int] = bp_numa
lowercase_ : Dict = bp_numa
lowercase_ : Tuple = bp_numa
lowercase_ : List[Any] = conva_get[:2]
lowercase_ : int = conva_get[2]
lowercase_ : Dict = size_pa
lowercase_ : int = rate_w
lowercase_ : Union[str, Any] = rate_t
lowercase_ : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1
lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(__UpperCamelCase ,'wb' ) as f:
pickle.dump(__UpperCamelCase ,__UpperCamelCase )
print(f'''Model saved: {save_path}''' )
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
with open(__UpperCamelCase ,'rb' ) as f:
lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301
lowercase_ : str = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' )
lowercase_ : Optional[Any] = model_dic.get('num_bp1' )
lowercase_ : str = model_dic.get('num_bp2' )
lowercase_ : Optional[Any] = model_dic.get('num_bp3' )
lowercase_ : Union[str, Any] = model_dic.get('rate_weight' )
lowercase_ : Optional[int] = model_dic.get('rate_thre' )
# create model instance
lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# modify model parameter
lowercase_ : Optional[Any] = model_dic.get('w_conv1' )
lowercase_ : Tuple = model_dic.get('wkj' )
lowercase_ : Union[str, Any] = model_dic.get('vji' )
lowercase_ : Optional[Any] = model_dic.get('thre_conv1' )
lowercase_ : Dict = model_dic.get('thre_bp2' )
lowercase_ : Optional[int] = model_dic.get('thre_bp3' )
return conv_ins
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return round(__UpperCamelCase ,3 )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = convs[0]
lowercase_ : Any = convs[1]
lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0]
# get the data slice of original image data, data_focus
lowercase_ : Tuple = []
for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
lowercase_ : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__UpperCamelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase_ : Dict = []
lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__UpperCamelCase ):
lowercase_ : Tuple = []
for i_focus in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[int] = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__UpperCamelCase ) )
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape(
__UpperCamelCase ,__UpperCamelCase )
data_featuremap.append(__UpperCamelCase )
# expanding the data slice to One dimenssion
lowercase_ : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) )
lowercase_ : str = np.asarray(__UpperCamelCase )
return focus_list, data_featuremap
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple:
'''simple docstring'''
lowercase_ : Union[str, Any] = len(featuremaps[0] )
lowercase_ : str = int(size_map / size_pooling )
lowercase_ : Optional[int] = []
for i_map in range(len(__UpperCamelCase ) ):
lowercase_ : int = featuremaps[i_map]
lowercase_ : List[str] = []
for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[str] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__UpperCamelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__UpperCamelCase ) )
lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase )
featuremap_pooled.append(__UpperCamelCase )
return featuremap_pooled
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Tuple = []
for i in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[Any] = np.shape(data[i] )
lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] )
lowercase_ : List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(__UpperCamelCase )
lowercase_ : int = np.asarray(__UpperCamelCase )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Any = np.asarray(__UpperCamelCase )
lowercase_ : Any = np.shape(__UpperCamelCase )
lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Any = []
lowercase_ : List[Any] = 0
for i_map in range(__UpperCamelCase ):
lowercase_ : List[str] = np.ones((size_map, size_map) )
for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = pd_pool[
i_pool
]
lowercase_ : Any = i_pool + 1
lowercase_ : Optional[int] = np.multiply(
__UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(__UpperCamelCase )
return pd_all
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]:
'''simple docstring'''
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) )
print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) )
lowercase_ : int = 0
lowercase_ : Tuple = []
lowercase_ : Tuple = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowercase_ : List[str] = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(__UpperCamelCase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase_ : int = np.asmatrix(datas_train[p] )
lowercase_ : Any = np.asarray(datas_teach[p] )
lowercase_ , lowercase_ : Tuple = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : Optional[int] = np.shape(__UpperCamelCase )
lowercase_ : Optional[int] = self._expand(__UpperCamelCase )
lowercase_ : int = data_bp_input
lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa
lowercase_ : Dict = self.sig(__UpperCamelCase )
lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa
lowercase_ : int = self.sig(__UpperCamelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase_ : str = np.multiply(
(data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Optional[int] = np.multiply(
np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji )
lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase_ : Dict = pd_conva_pooled.T.getA().tolist()
lowercase_ : List[Any] = self._calculate_gradient_from_pool(
__UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase_ : Dict = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre
lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase_ : int = rp + 1
lowercase_ : Union[str, Any] = error_count / patterns
all_mse.append(__UpperCamelCase )
def draw_error():
lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__UpperCamelCase ,'+-' )
plt.plot(__UpperCamelCase ,'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(__UpperCamelCase ,alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) )
for p in range(len(__UpperCamelCase ) ):
lowercase_ : List[Any] = np.asmatrix(datas_test[p] )
lowercase_ , lowercase_ : Optional[Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : List[str] = self._expand(__UpperCamelCase )
lowercase_ : Any = data_bp_input
lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa
lowercase_ : str = self.sig(__UpperCamelCase )
lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa
lowercase_ : Optional[int] = self.sig(__UpperCamelCase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out]
return np.asarray(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase )
lowercase_ , lowercase_ : Union[str, Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 321
| 1
|
"""simple docstring"""
import math
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
lowercase_ : List[str] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : float = 1 / 1_23_45 ):
lowercase_ : Optional[Any] = 0
lowercase_ : Optional[int] = 0
lowercase_ : List[Any] = 3
while True:
lowercase_ : Dict = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__SCREAMING_SNAKE_CASE ):
lowercase_ : Any = int(__SCREAMING_SNAKE_CASE )
total_partitions += 1
if check_partition_perfect(__SCREAMING_SNAKE_CASE ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__SCREAMING_SNAKE_CASE )
integer += 1
if __name__ == "__main__":
print(F"{solution() = }")
| 321
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
lowercase_ : Dict = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : str = 'patrickvonplaten/t5-tiny-random'
lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] )
lowercase_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__UpperCamelCase ):
self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'current' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
| 321
| 1
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : List[Any] = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowercase_ : List[str] = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' )
lowercase_ : Any = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' )
lowercase_ : Union[str, Any] = key.replace('heads.cmd.itm_head.cls' , 'itm_head' )
lowercase_ : Optional[int] = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' )
lowercase_ : str = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' )
lowercase_ : Any = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' )
lowercase_ : List[str] = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' )
lowercase_ : List[Any] = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' )
lowercase_ : str = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' )
lowercase_ : Optional[Any] = key.replace('image_encoder.module' , 'flava.image_model' )
lowercase_ : Optional[int] = key.replace('text_encoder.module' , 'flava.text_model' )
lowercase_ : Any = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' )
lowercase_ : Tuple = key.replace('mm_encoder.module' , 'flava.multimodal_model' )
lowercase_ : Any = key.replace('text_projection' , 'flava.text_projection' )
lowercase_ : Optional[int] = key.replace('image_projection' , 'flava.image_projection' )
lowercase_ : Union[str, Any] = value.float()
for key, value in codebook_state_dict.items():
lowercase_ : Optional[Any] = value
return upgrade
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
if config_path is not None:
lowercase_ : Union[str, Any] = FlavaConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : Dict = FlavaConfig()
lowercase_ : int = FlavaForPreTraining(__SCREAMING_SNAKE_CASE ).eval()
lowercase_ : Dict = convert_dalle_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , save_checkpoint=__SCREAMING_SNAKE_CASE )
if os.path.exists(__SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' )
else:
lowercase_ : List[str] = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )
lowercase_ : Any = upgrade_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
hf_model.load_state_dict(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = hf_model.state_dict()
lowercase_ : Optional[Any] = count_parameters(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = count_parameters(__SCREAMING_SNAKE_CASE ) + count_parameters(__SCREAMING_SNAKE_CASE )
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 )
hf_model.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 321
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( lowercase_ ):
lowercase = ['input_values', 'padding_mask']
def __init__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = 2_4000 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
super().__init__(feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : List[str] = chunk_length_s
lowercase_ : Tuple = overlap
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
lowercase_ : Optional[int] = True
lowercase_ : Optional[int] = bool(
isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ):
lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa )
elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowercase_ : List[str] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowercase_ : Dict = [np.asarray(__UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__UpperCamelCase ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
lowercase_ : Optional[int] = None
lowercase_ : List[Any] = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowercase_ : List[Any] = min(array.shape[0] for array in raw_audio )
lowercase_ : int = int(np.floor(max_length / self.chunk_stride ) )
lowercase_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowercase_ : List[Any] = max(array.shape[0] for array in raw_audio )
lowercase_ : Tuple = int(np.ceil(max_length / self.chunk_stride ) )
lowercase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
lowercase_ : Union[str, Any] = 'max_length'
else:
lowercase_ : int = input_values
# normal padding on batch
if padded_inputs is None:
lowercase_ : int = self.pad(
__UpperCamelCase ,max_length=__UpperCamelCase ,truncation=__UpperCamelCase ,padding=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,)
if padding:
lowercase_ : Optional[int] = padded_inputs.pop('attention_mask' )
lowercase_ : Dict = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
lowercase_ : Optional[int] = example[..., None]
input_values.append(example.T )
lowercase_ : str = input_values
if return_tensors is not None:
lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs
| 321
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase__( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowercase_ , lowercase_ : Dict = array[indexa], array[indexa]
def lowercase__( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
if length > 1:
lowercase_ : List[str] = int(length / 2 )
for i in range(__SCREAMING_SNAKE_CASE , low + middle ):
comp_and_swap(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i + middle , __SCREAMING_SNAKE_CASE )
bitonic_merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
bitonic_merge(__SCREAMING_SNAKE_CASE , low + middle , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
if length > 1:
lowercase_ : Optional[Any] = int(length / 2 )
bitonic_sort(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1 )
bitonic_sort(__SCREAMING_SNAKE_CASE , low + middle , __SCREAMING_SNAKE_CASE , 0 )
bitonic_merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =input("Enter numbers separated by a comma:\n").strip()
__SCREAMING_SNAKE_CASE =[int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 321
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 321
| 1
|
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=1 ):
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str]=0 ):
lowercase_ : Dict = []
for old_item in old_list:
lowercase_ : Union[str, Any] = old_item.replace('in_layers.0' , 'norm1' )
lowercase_ : Any = new_item.replace('in_layers.2' , 'conv1' )
lowercase_ : Union[str, Any] = new_item.replace('out_layers.0' , 'norm2' )
lowercase_ : Any = new_item.replace('out_layers.3' , 'conv2' )
lowercase_ : Tuple = new_item.replace('emb_layers.1' , 'time_emb_proj' )
lowercase_ : Any = new_item.replace('skip_connection' , 'conv_shortcut' )
lowercase_ : List[Any] = shave_segments(__SCREAMING_SNAKE_CASE , n_shave_prefix_segments=__SCREAMING_SNAKE_CASE )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any]=0 ):
lowercase_ : List[Any] = []
for old_item in old_list:
lowercase_ : Dict = old_item
lowercase_ : Any = new_item.replace('norm.weight' , 'group_norm.weight' )
lowercase_ : List[str] = new_item.replace('norm.bias' , 'group_norm.bias' )
lowercase_ : str = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
lowercase_ : Optional[int] = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
lowercase_ : Tuple = shave_segments(__SCREAMING_SNAKE_CASE , n_shave_prefix_segments=__SCREAMING_SNAKE_CASE )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Any=None ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowercase_ : int = old_checkpoint[path]
lowercase_ : str = old_tensor.shape[0] // 3
lowercase_ : Union[str, Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowercase_ : List[Any] = old_tensor.shape[0] // config['num_head_channels'] // 3
lowercase_ : List[str] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowercase_ , lowercase_ , lowercase_ : List[str] = old_tensor.split(channels // num_heads , dim=1 )
lowercase_ : str = query.reshape(__SCREAMING_SNAKE_CASE )
lowercase_ : int = key.reshape(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = value.reshape(__SCREAMING_SNAKE_CASE )
for path in paths:
lowercase_ : Dict = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowercase_ : Union[str, Any] = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
lowercase_ : Optional[Any] = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
lowercase_ : Optional[int] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
lowercase_ : List[str] = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowercase_ : Optional[Any] = old_checkpoint[path['old']][:, :, 0]
else:
lowercase_ : List[str] = old_checkpoint[path['old']]
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : Tuple = {}
lowercase_ : List[Any] = checkpoint['time_embed.0.weight']
lowercase_ : int = checkpoint['time_embed.0.bias']
lowercase_ : List[str] = checkpoint['time_embed.2.weight']
lowercase_ : int = checkpoint['time_embed.2.bias']
lowercase_ : Optional[Any] = checkpoint['input_blocks.0.0.weight']
lowercase_ : Tuple = checkpoint['input_blocks.0.0.bias']
lowercase_ : List[Any] = checkpoint['out.0.weight']
lowercase_ : Dict = checkpoint['out.0.bias']
lowercase_ : Any = checkpoint['out.2.weight']
lowercase_ : List[str] = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
lowercase_ : List[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
lowercase_ : Union[str, Any] = {
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(__SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the middle blocks only
lowercase_ : Optional[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
lowercase_ : Tuple = {
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(__SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the output blocks only
lowercase_ : List[str] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
lowercase_ : Any = {
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(__SCREAMING_SNAKE_CASE )
}
for i in range(1 , __SCREAMING_SNAKE_CASE ):
lowercase_ : Any = (i - 1) // (config['num_res_blocks'] + 1)
lowercase_ : Any = (i - 1) % (config['num_res_blocks'] + 1)
lowercase_ : Any = [key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
lowercase_ : int = [key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
lowercase_ : List[str] = checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
lowercase_ : Optional[int] = checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
lowercase_ : Tuple = renew_resnet_paths(__SCREAMING_SNAKE_CASE )
lowercase_ : int = {'old': F'''input_blocks.{i}.0''', 'new': F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
lowercase_ : List[str] = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path, resnet_op] , config=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = renew_attention_paths(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = {
'old': F'''input_blocks.{i}.1''',
'new': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowercase_ : int = {
F'''input_blocks.{i}.1.qkv.bias''': {
'key': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'query': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'value': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
'key': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'query': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'value': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , attention_paths_to_split=__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE , )
lowercase_ : Tuple = middle_blocks[0]
lowercase_ : Optional[int] = middle_blocks[1]
lowercase_ : Union[str, Any] = middle_blocks[2]
lowercase_ : List[str] = renew_resnet_paths(__SCREAMING_SNAKE_CASE )
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = renew_resnet_paths(__SCREAMING_SNAKE_CASE )
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = renew_attention_paths(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , attention_paths_to_split=__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = i // (config['num_res_blocks'] + 1)
lowercase_ : str = i % (config['num_res_blocks'] + 1)
lowercase_ : Optional[int] = [shave_segments(__SCREAMING_SNAKE_CASE , 2 ) for name in output_blocks[i]]
lowercase_ : Union[str, Any] = {}
for layer in output_block_layers:
lowercase_ , lowercase_ : str = layer.split('.' )[0], shave_segments(__SCREAMING_SNAKE_CASE , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : str = [layer_name]
if len(__SCREAMING_SNAKE_CASE ) > 1:
lowercase_ : Dict = [key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
lowercase_ : Dict = [key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
lowercase_ : Optional[int] = renew_resnet_paths(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = renew_resnet_paths(__SCREAMING_SNAKE_CASE )
lowercase_ : int = {'old': F'''output_blocks.{i}.0''', 'new': F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=__SCREAMING_SNAKE_CASE )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowercase_ : Any = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
lowercase_ : List[Any] = checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
lowercase_ : Optional[int] = checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(__SCREAMING_SNAKE_CASE ) == 2:
lowercase_ : Optional[Any] = []
if len(__SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = renew_attention_paths(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = {
'old': F'''output_blocks.{i}.1''',
'new': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowercase_ : Optional[Any] = {
F'''output_blocks.{i}.1.qkv.bias''': {
'key': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'query': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'value': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
'key': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'query': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'value': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=__SCREAMING_SNAKE_CASE , )
else:
lowercase_ : Dict = renew_resnet_paths(__SCREAMING_SNAKE_CASE , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowercase_ : Optional[int] = '.'.join(['output_blocks', str(__SCREAMING_SNAKE_CASE ), path['old']] )
lowercase_ : Union[str, Any] = '.'.join(['up_blocks', str(__SCREAMING_SNAKE_CASE ), 'resnets', str(__SCREAMING_SNAKE_CASE ), path['new']] )
lowercase_ : List[str] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
__SCREAMING_SNAKE_CASE =parser.parse_args()
__SCREAMING_SNAKE_CASE =torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__SCREAMING_SNAKE_CASE =json.loads(f.read())
__SCREAMING_SNAKE_CASE =convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__SCREAMING_SNAKE_CASE =UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__SCREAMING_SNAKE_CASE =DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
__SCREAMING_SNAKE_CASE =VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1]))
__SCREAMING_SNAKE_CASE =LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 321
|
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ):
require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
| 321
| 1
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__SCREAMING_SNAKE_CASE =TypeVar("KEY")
__SCREAMING_SNAKE_CASE =TypeVar("VAL")
@dataclass(frozen=lowercase_ , slots=lowercase_ )
class UpperCamelCase ( Generic[KEY, VAL] ):
lowercase = 42
lowercase = 42
class UpperCamelCase ( _Item ):
def __init__( self ) -> None:
'''simple docstring'''
super().__init__(__UpperCamelCase ,__UpperCamelCase )
def __bool__( self ) -> bool:
'''simple docstring'''
return False
__SCREAMING_SNAKE_CASE =_DeletedItem()
class UpperCamelCase ( MutableMapping[KEY, VAL] ):
def __init__( self ,__UpperCamelCase = 8 ,__UpperCamelCase = 0.75 ) -> None:
'''simple docstring'''
lowercase_ : Optional[Any] = initial_block_size
lowercase_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowercase_ : Tuple = capacity_factor
lowercase_ : Optional[int] = 0
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
return hash(__UpperCamelCase ) % len(self._buckets )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> bool:
'''simple docstring'''
lowercase_ : Tuple = self._buckets[ind]
if not stored:
lowercase_ : List[str] = _Item(__UpperCamelCase ,__UpperCamelCase )
self._len += 1
return True
elif stored.key == key:
lowercase_ : Tuple = _Item(__UpperCamelCase ,__UpperCamelCase )
return True
else:
return False
def _UpperCAmelCase ( self ) -> bool:
'''simple docstring'''
lowercase_ : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowercase_ : Union[str, Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : int = self._buckets
lowercase_ : Tuple = [None] * new_size
lowercase_ : Union[str, Any] = 0
for item in old_buckets:
if item:
self._add_item(item.key ,item.val )
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Iterator[int]:
'''simple docstring'''
lowercase_ : str = self._get_bucket_index(__UpperCamelCase )
for _ in range(len(self._buckets ) ):
yield ind
lowercase_ : List[str] = self._get_next_ind(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCamelCase ):
if self._try_set(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
break
def __setitem__( self ,__UpperCamelCase ,__UpperCamelCase ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__UpperCamelCase ,__UpperCamelCase )
def __delitem__( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCamelCase ):
lowercase_ : Dict = self._buckets[ind]
if item is None:
raise KeyError(__UpperCamelCase )
if item is _deleted:
continue
if item.key == key:
lowercase_ : str = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self ,__UpperCamelCase ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCamelCase ):
lowercase_ : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCamelCase )
def __len__( self ) -> int:
'''simple docstring'''
return self._len
def __iter__( self ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
'''simple docstring'''
lowercase_ : str = ' ,'.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 321
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False ):
lowercase_ : int = 'backbone.' if is_semantic else ''
lowercase_ : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=False ):
for i in range(config.num_hidden_layers ):
lowercase_ : Any = 'backbone.' if is_semantic else ''
# queries, keys and values
lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' )
lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' )
lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' )
lowercase_ : List[str] = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = q_bias
lowercase_ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Any = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowercase_ : Any = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' )
lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' )
lowercase_ : Tuple = gamma_a
lowercase_ : List[Any] = gamma_a
def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = val
def lowercase__( ):
lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ):
lowercase_ : List[str] = False if 'rvlcdip' in checkpoint_url else True
lowercase_ : Dict = BeitConfig(use_absolute_position_embeddings=__SCREAMING_SNAKE_CASE , use_mask_token=__SCREAMING_SNAKE_CASE )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowercase_ : Any = 10_24
lowercase_ : List[str] = 40_96
lowercase_ : Tuple = 24
lowercase_ : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
lowercase_ : Optional[Any] = 16
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : int = 'rvlcdip-id2label.json'
lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase_ : str = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowercase_ : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
lowercase_ : Optional[Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowercase_ : Optional[int] = BeitForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(__SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Check outputs on an image
lowercase_ : List[Any] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__SCREAMING_SNAKE_CASE )
lowercase_ : str = prepare_img()
lowercase_ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
lowercase_ : int = encoding['pixel_values']
lowercase_ : Any = model(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = outputs.logits
# verify logits
lowercase_ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(__SCREAMING_SNAKE_CASE ), "Shape of logits not as expected"
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
if has_lm_head:
lowercase_ : List[str] = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
lowercase_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , )
model.push_to_hub(
repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 321
| 1
|
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
__SCREAMING_SNAKE_CASE ={
"b0": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 224,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 240,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 1408,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 260,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 1536,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 300,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 1792,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 380,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2048,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 456,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 2304,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 528,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 2560,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 600,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : str = EfficientNetConfig()
lowercase_ : Optional[int] = CONFIG_MAP[model_name]['hidden_dim']
lowercase_ : Any = CONFIG_MAP[model_name]['width_coef']
lowercase_ : Optional[int] = CONFIG_MAP[model_name]['depth_coef']
lowercase_ : List[Any] = CONFIG_MAP[model_name]['image_size']
lowercase_ : List[str] = CONFIG_MAP[model_name]['dropout_rate']
lowercase_ : List[Any] = CONFIG_MAP[model_name]['dw_padding']
lowercase_ : int = 'huggingface/label-files'
lowercase_ : Any = 'imagenet-1k-id2label.json'
lowercase_ : Union[str, Any] = 10_00
lowercase_ : str = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase_ : List[str] = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase_ : Dict = idalabel
lowercase_ : Dict = {v: k for k, v in idalabel.items()}
return config
def lowercase__( ):
lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase_ : Optional[int] = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Tuple = CONFIG_MAP[model_name]['image_size']
lowercase_ : Any = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=__SCREAMING_SNAKE_CASE , )
return preprocessor
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
lowercase_ : List[Any] = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
lowercase_ : str = sorted(set(__SCREAMING_SNAKE_CASE ) )
lowercase_ : int = len(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = {b: str(__SCREAMING_SNAKE_CASE ) for b, i in zip(__SCREAMING_SNAKE_CASE , range(__SCREAMING_SNAKE_CASE ) )}
lowercase_ : str = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
lowercase_ : Any = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
lowercase_ : Dict = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase_ : Union[str, Any] = 'efficientnet.' + item[1]
lowercase_ : Any = 'classifier.weight'
lowercase_ : Tuple = 'classifier.bias'
return key_mapping
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple ):
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase_ : Any = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase_ : int = torch.from_numpy(__SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase_ : str = torch.from_numpy(__SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase_ : List[str] = torch.from_numpy(np.transpose(__SCREAMING_SNAKE_CASE ) )
else:
lowercase_ : Union[str, Any] = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple ):
lowercase_ : Any = model_classes[model_name](
include_top=__SCREAMING_SNAKE_CASE , weights='imagenet' , input_tensor=__SCREAMING_SNAKE_CASE , input_shape=__SCREAMING_SNAKE_CASE , pooling=__SCREAMING_SNAKE_CASE , classes=10_00 , classifier_activation='softmax' , )
lowercase_ : Tuple = original_model.trainable_variables
lowercase_ : str = original_model.non_trainable_variables
lowercase_ : Tuple = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase_ : Dict = param.numpy()
lowercase_ : str = list(tf_params.keys() )
# Load HuggingFace model
lowercase_ : List[str] = get_efficientnet_config(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = EfficientNetForImageClassification(__SCREAMING_SNAKE_CASE ).eval()
lowercase_ : List[str] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
lowercase_ : List[str] = rename_keys(__SCREAMING_SNAKE_CASE )
replace_params(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowercase_ : List[Any] = convert_image_processor(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase_ : Dict = hf_model(**__SCREAMING_SNAKE_CASE )
lowercase_ : Any = outputs.logits.detach().numpy()
# Original model inference
lowercase_ : str = False
lowercase_ : List[Any] = CONFIG_MAP[model_name]['image_size']
lowercase_ : List[str] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase_ : int = image.img_to_array(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = np.expand_dims(__SCREAMING_SNAKE_CASE , axis=0 )
lowercase_ : Dict = original_model.predict(__SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
os.mkdir(__SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(__SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
lowercase_ : List[str] = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(__SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 321
|
"""simple docstring"""
__SCREAMING_SNAKE_CASE ={
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
__SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()}
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Union[str, Any] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
lowercase_ : Dict = ''
for word in coded.split():
while len(__SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
lowercase_ : Any = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 321
| 1
|
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__SCREAMING_SNAKE_CASE =logging.getLogger(__name__)
class UpperCamelCase :
def __init__( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = False
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any:
'''simple docstring'''
if not self.initialized:
lowercase_ : Optional[Any] = RagRetriever(
__UpperCamelCase ,question_encoder_tokenizer=__UpperCamelCase ,generator_tokenizer=__UpperCamelCase ,index=__UpperCamelCase ,init_retrieval=__UpperCamelCase ,)
lowercase_ : Dict = True
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
self.retriever.index.init_index()
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ , lowercase_ : Any = self.retriever._main_retrieve(__UpperCamelCase ,__UpperCamelCase )
return doc_ids, retrieved_doc_embeds
class UpperCamelCase ( lowercase_ ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ) -> Optional[int]:
'''simple docstring'''
if index is not None and index.is_initialized() and len(__UpperCamelCase ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
__UpperCamelCase ,question_encoder_tokenizer=__UpperCamelCase ,generator_tokenizer=__UpperCamelCase ,index=__UpperCamelCase ,init_retrieval=__UpperCamelCase ,)
lowercase_ : List[Any] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
for worker in self.retrieval_workers
] )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
lowercase_ : Any = self.retrieval_workers[random.randint(0 ,len(self.retrieval_workers ) - 1 )]
lowercase_ , lowercase_ : Optional[int] = ray.get(random_worker.retrieve.remote(__UpperCamelCase ,__UpperCamelCase ) )
else:
lowercase_ , lowercase_ : int = self._main_retrieve(__UpperCamelCase ,__UpperCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__UpperCamelCase )
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return super(__UpperCamelCase ,cls ).get_tokenizers(__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ : Tuple = kwargs.pop('config' ,__UpperCamelCase ) or RagConfig.from_pretrained(__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : List[Any] = RagTokenizer.from_pretrained(__UpperCamelCase ,config=__UpperCamelCase )
lowercase_ : str = rag_tokenizer.question_encoder
lowercase_ : Dict = rag_tokenizer.generator
if indexed_dataset is not None:
lowercase_ : Union[str, Any] = 'custom'
lowercase_ : List[Any] = CustomHFIndex(config.retrieval_vector_size ,__UpperCamelCase )
else:
lowercase_ : Union[str, Any] = cls._build_index(__UpperCamelCase )
return cls(
__UpperCamelCase ,question_encoder_tokenizer=__UpperCamelCase ,generator_tokenizer=__UpperCamelCase ,retrieval_workers=__UpperCamelCase ,index=__UpperCamelCase ,)
| 321
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
def count_of_possible_combinations_with_dp_array(
__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowercase_ : str = sum(
count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE )
for item in array )
lowercase_ : Tuple = answer
return answer
lowercase_ : Optional[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Dict = [0] * (target + 1)
lowercase_ : Dict = 1
for i in range(1 , target + 1 ):
for j in range(__SCREAMING_SNAKE_CASE ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE =3
__SCREAMING_SNAKE_CASE =5
__SCREAMING_SNAKE_CASE =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 321
| 1
|
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ):
try:
with open(__SCREAMING_SNAKE_CASE , 'rb' ) as flax_state_f:
lowercase_ : List[Any] = from_bytes(__SCREAMING_SNAKE_CASE , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(__SCREAMING_SNAKE_CASE ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'''Unable to convert {model_file} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
lowercase_ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda __SCREAMING_SNAKE_CASE : x.dtype == jnp.bfloataa , __SCREAMING_SNAKE_CASE ) ).values()
if any(__SCREAMING_SNAKE_CASE ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
lowercase_ : Optional[Any] = jax.tree_util.tree_map(
lambda __SCREAMING_SNAKE_CASE : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __SCREAMING_SNAKE_CASE )
lowercase_ : str = ''
lowercase_ : List[str] = flatten_dict(__SCREAMING_SNAKE_CASE , sep='.' )
lowercase_ : List[str] = pt_model.state_dict()
# keep track of unexpected & missing keys
lowercase_ : Dict = []
lowercase_ : List[str] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowercase_ : str = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowercase_ : Any = flax_key_tuple_array[:-1] + ['weight']
lowercase_ : Any = jnp.transpose(__SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowercase_ : Dict = flax_key_tuple_array[:-1] + ['weight']
lowercase_ : Any = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowercase_ : int = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ : List[Any] = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
lowercase_ : Dict = '.'.join(__SCREAMING_SNAKE_CASE )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
lowercase_ : str = np.asarray(__SCREAMING_SNAKE_CASE ) if not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) else flax_tensor
lowercase_ : Union[str, Any] = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# remove from missing keys
missing_keys.remove(__SCREAMING_SNAKE_CASE )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__SCREAMING_SNAKE_CASE )
pt_model.load_state_dict(__SCREAMING_SNAKE_CASE )
# re-transform missing_keys to list
lowercase_ : Tuple = list(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(__SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
' use it for predictions and inference.' )
return pt_model
| 321
|
"""simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : int = set_counts
lowercase_ : List[Any] = max(__UpperCamelCase )
lowercase_ : Union[str, Any] = len(__UpperCamelCase )
lowercase_ : Dict = [1] * num_sets
lowercase_ : Optional[int] = list(range(__UpperCamelCase ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase )
lowercase_ : int = self.get_parent(__UpperCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase_ : Tuple = 0
lowercase_ : str = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase_ : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase_ : str = 0
lowercase_ : Tuple = src_parent
lowercase_ : int = self.set_counts[src_parent]
lowercase_ : str = max(self.max_set ,__UpperCamelCase )
return True
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 321
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"nielsr/canine-s": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
__SCREAMING_SNAKE_CASE =111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
__SCREAMING_SNAKE_CASE =0
__SCREAMING_SNAKE_CASE =0XE0_00
__SCREAMING_SNAKE_CASE =0XE0_01
__SCREAMING_SNAKE_CASE =0XE0_02
__SCREAMING_SNAKE_CASE =0XE0_03
__SCREAMING_SNAKE_CASE =0XE0_04
# Maps special codepoints to human-readable names.
__SCREAMING_SNAKE_CASE ={
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
__SCREAMING_SNAKE_CASE ={name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCamelCase ( lowercase_ ):
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,__UpperCamelCase=chr(__UpperCamelCase ) ,__UpperCamelCase=chr(__UpperCamelCase ) ,__UpperCamelCase=chr(__UpperCamelCase ) ,__UpperCamelCase=chr(__UpperCamelCase ) ,__UpperCamelCase=chr(__UpperCamelCase ) ,__UpperCamelCase=chr(__UpperCamelCase ) ,__UpperCamelCase=False ,__UpperCamelCase=2048 ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else bos_token
lowercase_ : Optional[Any] = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else eos_token
lowercase_ : List[str] = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else sep_token
lowercase_ : str = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else cls_token
lowercase_ : int = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : str = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else mask_token
super().__init__(
bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,model_max_length=__UpperCamelCase ,**__UpperCamelCase ,)
# Creates a mapping for looking up the IDs of special symbols.
lowercase_ : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowercase_ : Tuple = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowercase_ : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowercase_ : Any = UNICODE_VOCAB_SIZE
lowercase_ : Optional[Any] = len(self._special_codepoints )
@property
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return self._unicode_vocab_size
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
return list(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
try:
return ord(__UpperCamelCase )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str:
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__UpperCamelCase )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str:
'''simple docstring'''
return "".join(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : str = [self.sep_token_id]
lowercase_ : Tuple = [self.cls_token_id]
lowercase_ : str = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase ,token_ids_a=__UpperCamelCase ,already_has_special_tokens=__UpperCamelCase )
lowercase_ : int = [1] + ([0] * len(__UpperCamelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(__UpperCamelCase )) + [1]
return result
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : int = [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
lowercase_ : List[Any] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> str:
'''simple docstring'''
return ()
| 321
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__SCREAMING_SNAKE_CASE ={
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__SCREAMING_SNAKE_CASE ={"facebook/blenderbot-3B": 128}
class UpperCamelCase ( lowercase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BlenderbotTokenizer
def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]:
'''simple docstring'''
super().__init__(
__UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,)
lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space:
lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) )
lowercase_ : Any = add_prefix_space
lowercase_ : Tuple = pre_tok_class(**__UpperCamelCase )
lowercase_ : int = add_prefix_space
lowercase_ : Any = 'post_processor'
lowercase_ : Optional[Any] = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase )
if tokenizer_component_instance:
lowercase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ : str = tuple(state['sep'] )
if "cls" in state:
lowercase_ : Union[str, Any] = tuple(state['cls'] )
lowercase_ : str = False
if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space:
lowercase_ : Dict = add_prefix_space
lowercase_ : int = True
if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets:
lowercase_ : Optional[Any] = trim_offsets
lowercase_ : Tuple = True
if changes_to_apply:
lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,state.pop('type' ) )
lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value
lowercase_ : str = value
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ : Optional[int] = kwargs.get('is_split_into_words' ,__UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : int = [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Any:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]:
'''simple docstring'''
lowercase_ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
lowercase_ : Dict = ' '.join(__UpperCamelCase )
lowercase_ : str = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
lowercase_ : List[str] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 321
| 1
|
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
__SCREAMING_SNAKE_CASE ="src/transformers"
# Matches is_xxx_available()
__SCREAMING_SNAKE_CASE =re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__SCREAMING_SNAKE_CASE =re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__SCREAMING_SNAKE_CASE =re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__SCREAMING_SNAKE_CASE =re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__SCREAMING_SNAKE_CASE =re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__SCREAMING_SNAKE_CASE =re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__SCREAMING_SNAKE_CASE =re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__SCREAMING_SNAKE_CASE =re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__SCREAMING_SNAKE_CASE =re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__SCREAMING_SNAKE_CASE =re.compile(r"^\s*try:")
# Catches a line with else:
__SCREAMING_SNAKE_CASE =re.compile(r"^\s*else:")
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ):
if _re_test_backend.search(__SCREAMING_SNAKE_CASE ) is None:
return None
lowercase_ : Tuple = [b[0] for b in _re_backend.findall(__SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase_ : str = f.readlines()
lowercase_ : Any = 0
while line_index < len(__SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase_ : Optional[Any] = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowercase_ : Union[str, Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[int] = _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ).groups()[0]
lowercase_ : List[Any] = re.findall('\[([^\]]+)\]' , __SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowercase_ : Optional[Any] = _re_import_struct_key_value.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
lowercase_ : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
lowercase_ : int = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase_ : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase_ : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase_ : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowercase_ : str = lines[line_index]
if _re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase_ : Dict = _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase_ : Tuple = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase_ : Dict = _re_between_brackets.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase_ : Optional[int] = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
lowercase_ : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase_ : int = []
while (
line_index < len(__SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowercase_ : List[Any] = lines[line_index]
lowercase_ : List[str] = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase_ : Dict = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(__SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase_ : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase_ : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase_ : int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowercase_ : str = lines[line_index]
lowercase_ : Dict = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase_ : List[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ):
def find_duplicates(__SCREAMING_SNAKE_CASE : Union[str, Any] ):
return [k for k, v in collections.Counter(__SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase_ : int = []
for key in import_dict_objects.keys():
lowercase_ : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowercase_ : List[str] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase_ : Any = 'base imports' if key == 'none' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def lowercase__( ):
lowercase_ : Union[str, Any] = []
for root, _, files in os.walk(__SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowercase_ : Union[str, Any] = os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' )
lowercase_ : Optional[int] = parse_init(__SCREAMING_SNAKE_CASE )
if objects is not None:
lowercase_ : Optional[Any] = analyze_results(*__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase_ : Dict = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(__SCREAMING_SNAKE_CASE ) )
def lowercase__( ):
lowercase_ : Any = []
for path, directories, files in os.walk(__SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(__SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
lowercase_ : Dict = str((Path(__SCREAMING_SNAKE_CASE ) / folder).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Optional[int] = short_path.replace(os.path.sep , '.' )
submodules.append(__SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
lowercase_ : Dict = str((Path(__SCREAMING_SNAKE_CASE ) / fname).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Tuple = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(__SCREAMING_SNAKE_CASE )
return submodules
__SCREAMING_SNAKE_CASE =[
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def lowercase__( ):
# This is to make sure the transformers module imported is the one in the repo.
lowercase_ : List[str] = importlib.util.spec_from_file_location(
'transformers' , os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowercase_ : List[str] = spec.loader.load_module()
lowercase_ : str = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase_ : Any = '\n'.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 321
|
"""simple docstring"""
import os
import sys
import unittest
__SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py")
__SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase )
lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase )
lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'}
lowercase_ : Union[str, Any] = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase )
lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase )
lowercase_ : Any = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
lowercase_ : Any = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase )
lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase )
lowercase_ : Tuple = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
lowercase_ : Optional[Any] = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
| 321
| 1
|
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str]=5 ):
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('<mask>' ) == 1
lowercase_ : List[Any] = torch.tensor(tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) ).unsqueeze(0 ) # Batch size 1
lowercase_ : Any = model(__SCREAMING_SNAKE_CASE )[0] # The last hidden-state is the first element of the output tuple
lowercase_ : int = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
lowercase_ : Tuple = logits[0, masked_index, :]
lowercase_ : Optional[int] = logits.softmax(dim=0 )
lowercase_ , lowercase_ : str = prob.topk(k=__SCREAMING_SNAKE_CASE , dim=0 )
lowercase_ : Optional[Any] = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__SCREAMING_SNAKE_CASE ) )] )
lowercase_ : Tuple = tokenizer.mask_token
lowercase_ : List[str] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ):
lowercase_ : Union[str, Any] = predicted_token_bpe.replace('\u2581' , ' ' )
if " {0}".format(__SCREAMING_SNAKE_CASE ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__SCREAMING_SNAKE_CASE =CamembertTokenizer.from_pretrained("camembert-base")
__SCREAMING_SNAKE_CASE =CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
__SCREAMING_SNAKE_CASE ="Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 321
|
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ):
with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX )
try:
print(*__SCREAMING_SNAKE_CASE )
finally:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
__SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank)
__SCREAMING_SNAKE_CASE =socket.gethostname()
__SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__SCREAMING_SNAKE_CASE =dist.get_rank()
__SCREAMING_SNAKE_CASE =dist.get_world_size()
printflock(F"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(F"{gpu} is broken")
raise
| 321
| 1
|
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__SCREAMING_SNAKE_CASE =getLogger(__name__)
__SCREAMING_SNAKE_CASE ="cuda" if torch.cuda.is_available() else "cpu"
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 8 , __SCREAMING_SNAKE_CASE : str = DEFAULT_DEVICE , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Union[str, Any]="summarization" , __SCREAMING_SNAKE_CASE : Dict=None , **__SCREAMING_SNAKE_CASE : Dict , ):
lowercase_ : List[str] = Path(__SCREAMING_SNAKE_CASE ).open('w' , encoding='utf-8' )
lowercase_ : str = str(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
if fpaa:
lowercase_ : List[Any] = model.half()
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
lowercase_ : List[Any] = time.time()
# update config with task specific params
use_task_specific_params(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if prefix is None:
lowercase_ : List[Any] = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) ):
lowercase_ : Dict = [prefix + text for text in examples_chunk]
lowercase_ : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='pt' , truncation=__SCREAMING_SNAKE_CASE , padding='longest' ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **__SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[int] = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
lowercase_ : Optional[int] = int(time.time() - start_time ) # seconds
lowercase_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowercase__( ):
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any]=True ):
lowercase_ : Any = argparse.ArgumentParser()
parser.add_argument('model_name' , type=__SCREAMING_SNAKE_CASE , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=__SCREAMING_SNAKE_CASE , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=__SCREAMING_SNAKE_CASE , help='where to save summaries' )
parser.add_argument('--reference_path' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=__SCREAMING_SNAKE_CASE , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=__SCREAMING_SNAKE_CASE , default=8 , required=__SCREAMING_SNAKE_CASE , help='batch size' )
parser.add_argument(
'--n_obs' , type=__SCREAMING_SNAKE_CASE , default=-1 , required=__SCREAMING_SNAKE_CASE , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=__SCREAMING_SNAKE_CASE , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
lowercase_ , lowercase_ : str = parser.parse_known_args()
lowercase_ : Any = parse_numeric_n_bool_cl_kwargs(__SCREAMING_SNAKE_CASE )
if parsed_args and verbose:
print(F'''parsed the following generate kwargs: {parsed_args}''' )
lowercase_ : Dict = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
lowercase_ : Any = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
lowercase_ : Optional[int] = generate_summaries_or_translations(
__SCREAMING_SNAKE_CASE , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **__SCREAMING_SNAKE_CASE , )
if args.reference_path is None:
return {}
# Compute scores
lowercase_ : Union[str, Any] = calculate_bleu if 'translation' in args.task else calculate_rouge
lowercase_ : Union[str, Any] = [x.rstrip() for x in open(args.save_path ).readlines()]
lowercase_ : Dict = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(__SCREAMING_SNAKE_CASE )]
lowercase_ : dict = score_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
scores.update(__SCREAMING_SNAKE_CASE )
if args.dump_args:
scores.update(__SCREAMING_SNAKE_CASE )
if args.info:
lowercase_ : int = args.info
if verbose:
print(__SCREAMING_SNAKE_CASE )
if args.score_path is not None:
json.dump(__SCREAMING_SNAKE_CASE , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 321
|
"""simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : List[Any] = name
lowercase_ : int = val
def __str__( self ) -> Tuple:
'''simple docstring'''
return f'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return self.val < other.val
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = {}
lowercase_ : Tuple = {}
lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase )
def __getitem__( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
return self.get_value(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
return (idx - 1) // 2
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return idx * 2 + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return idx * 2 + 2
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
return self.heap_dict[key]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1
lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
lowercase_ : Any = idx
lowercase_ : str = i.val
for i in range(__UpperCamelCase ,-1 ,-1 ):
self.sift_down(__UpperCamelCase ,__UpperCamelCase )
return array
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
while True:
lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase )
lowercase_ : List[str] = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
lowercase_ : List[str] = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
lowercase_ : Dict = r
if smallest != idx:
lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx]
(
(
lowercase_
) , (
lowercase_
) ,
) : str = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowercase_ : Any = smallest
else:
break
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowercase_ : int = p
lowercase_ : str = self.get_parent_idx(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return self.heap[0]
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowercase_ : Tuple = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 ,self.heap )
return x
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
self.heap.append(__UpperCamelCase )
lowercase_ : Tuple = len(self.heap ) - 1
lowercase_ : Optional[int] = node.val
self.sift_up(len(self.heap ) - 1 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.heap ) == 0
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowercase_ : Any = new_value
lowercase_ : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
__SCREAMING_SNAKE_CASE =Node("R", -1)
__SCREAMING_SNAKE_CASE =Node("B", 6)
__SCREAMING_SNAKE_CASE =Node("A", 3)
__SCREAMING_SNAKE_CASE =Node("X", 1)
__SCREAMING_SNAKE_CASE =Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
| 1
|
"""simple docstring"""
from timeit import timeit
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
lowercase_ : List[Any] = 0
while number:
number &= number - 1
result += 1
return result
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
lowercase_ : Optional[Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowercase__( ):
def do_benchmark(__SCREAMING_SNAKE_CASE : int ) -> None:
lowercase_ : Optional[int] = 'import __main__ as z'
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(__SCREAMING_SNAKE_CASE ) = }''' )
lowercase_ : Union[str, Any] = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=__SCREAMING_SNAKE_CASE )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(__SCREAMING_SNAKE_CASE ) = }''' )
lowercase_ : Optional[Any] = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=__SCREAMING_SNAKE_CASE , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(__SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 321
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[Any] = tempfile.mkdtemp()
# fmt: off
lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) )
lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowercase_ : Tuple = {'unk_token': '<unk>'}
lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCamelCase ) )
lowercase_ : Any = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_tokenizer()
lowercase_ : List[Any] = self.get_rust_tokenizer()
lowercase_ : Tuple = self.get_image_processor()
lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase )
lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 )
lowercase_ : Any = CLIPSegProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = self.get_image_processor()
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : List[Any] = self.prepare_image_inputs()
lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' )
lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = self.get_image_processor()
lowercase_ : List[Any] = self.get_tokenizer()
lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Dict = 'lower newer'
lowercase_ : Any = processor(text=__UpperCamelCase )
lowercase_ : int = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = self.get_image_processor()
lowercase_ : str = self.get_tokenizer()
lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : List[Any] = 'lower newer'
lowercase_ : str = self.prepare_image_inputs()
lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = self.get_image_processor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Optional[int] = self.prepare_image_inputs()
lowercase_ : Optional[Any] = self.prepare_image_inputs()
lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = self.get_image_processor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase )
lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
| 321
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class UpperCamelCase :
lowercase = 42
lowercase = 42
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : list[list[Edge]] = [[] for _ in range(__UpperCamelCase )]
lowercase_ : Optional[Any] = size
def __getitem__( self ,__UpperCamelCase ) -> Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return self._size
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(__UpperCamelCase ,__UpperCamelCase ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> int | None:
'''simple docstring'''
lowercase_ : Union[str, Any] = deque([start_vertex] )
lowercase_ : list[int | None] = [None] * self.size
lowercase_ : Optional[Any] = 0
while queue:
lowercase_ : List[str] = queue.popleft()
lowercase_ : Optional[int] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase_ : Optional[Any] = current_distance + edge.weight
lowercase_ : List[Any] = distances[edge.destination_vertex]
if (
isinstance(__UpperCamelCase ,__UpperCamelCase )
and new_distance >= dest_vertex_distance
):
continue
lowercase_ : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 321
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE ={
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 321
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = parent
lowercase_ : Tuple = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Optional[Any] = is_training
lowercase_ : Any = use_input_mask
lowercase_ : Optional[Any] = vocab_size
lowercase_ : str = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Union[str, Any] = use_labels
lowercase_ : Union[str, Any] = scope
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : List[str] = None
if self.use_input_mask:
lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : str = self.prepare_config_and_inputs()
lowercase_ : int = True
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = True
lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Union[str, Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,)
lowercase_ : Dict = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int:
'''simple docstring'''
lowercase_ : List[str] = True
lowercase_ : Union[str, Any] = True
lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
# first forward pass
lowercase_ : str = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,)
lowercase_ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 )
lowercase_ : int = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
lowercase_ : List[Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
# select random slice
lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoderTester(self )
lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs()
lowercase_ : Optional[int] = 'bert'
self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase_ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Tuple = model(__UpperCamelCase )[0]
lowercase_ : Dict = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : str = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Dict = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : Dict = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE ={
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 321
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
return None
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
return None
class UpperCamelCase ( unittest.TestCase ):
lowercase = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
from transformers import BertModel
lowercase_ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(__UpperCamelCase ) )
vocab_file.flush()
lowercase_ : List[str] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase_ : Optional[Any] = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) )
model.save_pretrained(__UpperCamelCase )
self._test_export(__UpperCamelCase ,'pt' ,12 ,__UpperCamelCase )
@require_tf
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Optional[int] = self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase )
lowercase_ : int = quantize(Path(__UpperCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Tuple = self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase )
lowercase_ : Tuple = quantize(__UpperCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase_ : Dict = Path(__UpperCamelCase ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
return path
except Exception as e:
self.fail(__UpperCamelCase )
@require_torch
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
from transformers import BertModel
lowercase_ : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'pt' )
@require_tf
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
from transformers import TFBertModel
lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase_ : Any = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'tf' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Tuple = FeatureExtractionPipeline(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Dict = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(__UpperCamelCase ,__UpperCamelCase )
# Assert all variables are present
self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] ,__UpperCamelCase )
self.assertSequenceEqual(variable_names[3:] ,__UpperCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = ['input_ids', 'attention_mask', 'token_type_ids']
lowercase_ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowercase_ , lowercase_ : int = ensure_valid_input(FuncContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__UpperCamelCase ) ,3 )
# Should have exactly the same input names
self.assertEqual(set(__UpperCamelCase ) ,set(__UpperCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__UpperCamelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase_ , lowercase_ : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__UpperCamelCase ) ,1 )
self.assertEqual(len(__UpperCamelCase ) ,1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] ,tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] ,'input_ids' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
| 321
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
@property
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Tuple = ort.SessionOptions()
lowercase_ : str = False
return options
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
lowercase_ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
lowercase_ : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' ,revision='onnx' ,safety_checker=__UpperCamelCase ,feature_extractor=__UpperCamelCase ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : Tuple = 'A red cat sitting on a park bench'
lowercase_ : int = np.random.RandomState(0 )
lowercase_ : Any = pipe(
prompt=__UpperCamelCase ,image=__UpperCamelCase ,mask_image=__UpperCamelCase ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=__UpperCamelCase ,output_type='np' ,)
lowercase_ : List[Any] = output.images
lowercase_ : int = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowercase_ : Optional[Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
lowercase_ : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
lowercase_ : Dict = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' ,subfolder='scheduler' ,revision='onnx' )
lowercase_ : Any = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' ,revision='onnx' ,scheduler=__UpperCamelCase ,safety_checker=__UpperCamelCase ,feature_extractor=__UpperCamelCase ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : Optional[Any] = 'A red cat sitting on a park bench'
lowercase_ : Optional[int] = np.random.RandomState(0 )
lowercase_ : Any = pipe(
prompt=__UpperCamelCase ,image=__UpperCamelCase ,mask_image=__UpperCamelCase ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=__UpperCamelCase ,output_type='np' ,)
lowercase_ : Optional[Any] = output.images
lowercase_ : Optional[Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowercase_ : List[Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 321
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]]
lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(__UpperCamelCase ) # fails here
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 )
lowercase_ : str = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 )
lowercase_ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 )
lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 321
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__SCREAMING_SNAKE_CASE =[
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
__SCREAMING_SNAKE_CASE ={
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
__SCREAMING_SNAKE_CASE ={F"funnel-transformer/{name}": 512 for name in _model_names}
__SCREAMING_SNAKE_CASE ={F"funnel-transformer/{name}": {"do_lower_case": True} for name in _model_names}
class UpperCamelCase ( lowercase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = FunnelTokenizer
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = 2
def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<sep>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<cls>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=None ,__UpperCamelCase="##" ,**__UpperCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,do_lower_case=__UpperCamelCase ,unk_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,clean_text=__UpperCamelCase ,tokenize_chinese_chars=__UpperCamelCase ,strip_accents=__UpperCamelCase ,wordpieces_prefix=__UpperCamelCase ,**__UpperCamelCase ,)
lowercase_ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,__UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' ,__UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,__UpperCamelCase ) != tokenize_chinese_chars
):
lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,normalizer_state.pop('type' ) )
lowercase_ : Any = do_lower_case
lowercase_ : List[Any] = strip_accents
lowercase_ : Optional[int] = tokenize_chinese_chars
lowercase_ : List[str] = normalizer_class(**__UpperCamelCase )
lowercase_ : Dict = do_lower_case
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : Dict = [self.sep_token_id]
lowercase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ : Optional[Any] = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 321
|
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
def get_masked_lm_array(__SCREAMING_SNAKE_CASE : str ):
lowercase_ : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[Any] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_array(__SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : Tuple = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : List[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[str] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_attention_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = array.reshape(__SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[str] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
print(F'''Loading model based on config from {config_path}...''' )
lowercase_ : Any = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = BertForMaskedLM(__SCREAMING_SNAKE_CASE )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
lowercase_ : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
lowercase_ : BertSelfAttention = layer.attention.self
lowercase_ : str = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_query_dense/kernel' , self_attn.query.weight.data.shape )
lowercase_ : Tuple = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_query_dense/bias' , self_attn.query.bias.data.shape )
lowercase_ : Tuple = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_key_dense/kernel' , self_attn.key.weight.data.shape )
lowercase_ : int = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_key_dense/bias' , self_attn.key.bias.data.shape )
lowercase_ : Dict = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_value_dense/kernel' , self_attn.value.weight.data.shape )
lowercase_ : List[Any] = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
lowercase_ : BertSelfOutput = layer.attention.output
lowercase_ : Dict = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_output_dense/kernel' , self_output.dense.weight.data.shape )
lowercase_ : Any = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_output_dense/bias' , self_output.dense.bias.data.shape )
lowercase_ : Tuple = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/gamma' )
lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/beta' )
# Intermediate
lowercase_ : BertIntermediate = layer.intermediate
lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/kernel' )
lowercase_ : Optional[int] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/bias' )
# Output
lowercase_ : BertOutput = layer.output
lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/kernel' )
lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/bias' )
lowercase_ : List[str] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/gamma' )
lowercase_ : int = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/beta' )
# Embeddings
lowercase_ : Optional[Any] = get_encoder_array('_position_embedding_layer/embeddings' )
lowercase_ : int = get_encoder_array('_type_embedding_layer/embeddings' )
lowercase_ : Any = get_encoder_array('_embedding_norm_layer/gamma' )
lowercase_ : Optional[Any] = get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
lowercase_ : int = model.cls.predictions.transform
lowercase_ : str = get_masked_lm_array('dense/kernel' )
lowercase_ : Optional[Any] = get_masked_lm_array('dense/bias' )
lowercase_ : Optional[Any] = get_masked_lm_array('layer_norm/gamma' )
lowercase_ : Optional[int] = get_masked_lm_array('layer_norm/beta' )
lowercase_ : List[str] = get_masked_lm_array('embedding_table' )
# Pooling
lowercase_ : Optional[Any] = BertPooler(config=__SCREAMING_SNAKE_CASE )
lowercase_ : BertPooler = get_encoder_array('_pooler_layer/kernel' )
lowercase_ : BertPooler = get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(__SCREAMING_SNAKE_CASE )
# Integration test - should load without any errors ;)
lowercase_ : Tuple = BertForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 321
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : List[str] = list(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = list(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = 0
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if lista[i] != lista[i]:
count += 1
lowercase_ : List[Any] = '_'
if count > 1:
return False
else:
return "".join(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : list[str] ):
lowercase_ : List[Any] = []
while True:
lowercase_ : Optional[Any] = ['$'] * len(__SCREAMING_SNAKE_CASE )
lowercase_ : int = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
for j in range(i + 1 , len(__SCREAMING_SNAKE_CASE ) ):
lowercase_ : Tuple = compare_string(binary[i] , binary[j] )
if k is False:
lowercase_ : List[str] = '*'
lowercase_ : Optional[Any] = '*'
temp.append('X' )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return pi
lowercase_ : Optional[Any] = list(set(__SCREAMING_SNAKE_CASE ) )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Sequence[float] ):
lowercase_ : Any = []
for minterm in minterms:
lowercase_ : Union[str, Any] = ''
for _ in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = str(minterm % 2 ) + string
minterm //= 2
temp.append(__SCREAMING_SNAKE_CASE )
return temp
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Dict = list(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = list(__SCREAMING_SNAKE_CASE )
lowercase_ : str = 0
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowercase__( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : list[str] ):
lowercase_ : Tuple = []
lowercase_ : Optional[Any] = [0] * len(__SCREAMING_SNAKE_CASE )
for i in range(len(chart[0] ) ):
lowercase_ : int = 0
lowercase_ : Dict = -1
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
if chart[j][i] == 1:
count += 1
lowercase_ : Union[str, Any] = j
if count == 1:
lowercase_ : Dict = 1
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__SCREAMING_SNAKE_CASE ) ):
lowercase_ : List[str] = 0
temp.append(prime_implicants[i] )
while True:
lowercase_ : Optional[Any] = 0
lowercase_ : str = -1
lowercase_ : Any = 0
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
lowercase_ : Dict = chart[i].count(1 )
if count_n > max_n:
lowercase_ : List[str] = count_n
lowercase_ : str = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
lowercase_ : List[Any] = 0
def lowercase__( __SCREAMING_SNAKE_CASE : list[str] , __SCREAMING_SNAKE_CASE : list[str] ):
lowercase_ : int = [[0 for x in range(len(__SCREAMING_SNAKE_CASE ) )] for x in range(len(__SCREAMING_SNAKE_CASE ) )]
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
lowercase_ : str = prime_implicants[i].count('_' )
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
if is_for_table(prime_implicants[i] , binary[j] , __SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = 1
return chart
def lowercase__( ):
lowercase_ : List[Any] = int(input('Enter the no. of variables\n' ) )
lowercase_ : Optional[Any] = [
float(__SCREAMING_SNAKE_CASE )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
lowercase_ : Optional[int] = decimal_to_binary(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = check(__SCREAMING_SNAKE_CASE )
print('Prime Implicants are:' )
print(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = prime_implicant_chart(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = selection(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print('Essential Prime Implicants are:' )
print(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 321
|
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
__SCREAMING_SNAKE_CASE =namedtuple("covid_data", "cases deaths recovered")
def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ):
lowercase_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 321
| 1
|
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[int] = bp_numa
lowercase_ : Dict = bp_numa
lowercase_ : Tuple = bp_numa
lowercase_ : List[Any] = conva_get[:2]
lowercase_ : int = conva_get[2]
lowercase_ : Dict = size_pa
lowercase_ : int = rate_w
lowercase_ : Union[str, Any] = rate_t
lowercase_ : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1
lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(__UpperCamelCase ,'wb' ) as f:
pickle.dump(__UpperCamelCase ,__UpperCamelCase )
print(f'''Model saved: {save_path}''' )
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
with open(__UpperCamelCase ,'rb' ) as f:
lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301
lowercase_ : str = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' )
lowercase_ : Optional[Any] = model_dic.get('num_bp1' )
lowercase_ : str = model_dic.get('num_bp2' )
lowercase_ : Optional[Any] = model_dic.get('num_bp3' )
lowercase_ : Union[str, Any] = model_dic.get('rate_weight' )
lowercase_ : Optional[int] = model_dic.get('rate_thre' )
# create model instance
lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# modify model parameter
lowercase_ : Optional[Any] = model_dic.get('w_conv1' )
lowercase_ : Tuple = model_dic.get('wkj' )
lowercase_ : Union[str, Any] = model_dic.get('vji' )
lowercase_ : Optional[Any] = model_dic.get('thre_conv1' )
lowercase_ : Dict = model_dic.get('thre_bp2' )
lowercase_ : Optional[int] = model_dic.get('thre_bp3' )
return conv_ins
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return round(__UpperCamelCase ,3 )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = convs[0]
lowercase_ : Any = convs[1]
lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0]
# get the data slice of original image data, data_focus
lowercase_ : Tuple = []
for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
lowercase_ : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__UpperCamelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase_ : Dict = []
lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__UpperCamelCase ):
lowercase_ : Tuple = []
for i_focus in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[int] = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__UpperCamelCase ) )
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape(
__UpperCamelCase ,__UpperCamelCase )
data_featuremap.append(__UpperCamelCase )
# expanding the data slice to One dimenssion
lowercase_ : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) )
lowercase_ : str = np.asarray(__UpperCamelCase )
return focus_list, data_featuremap
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple:
'''simple docstring'''
lowercase_ : Union[str, Any] = len(featuremaps[0] )
lowercase_ : str = int(size_map / size_pooling )
lowercase_ : Optional[int] = []
for i_map in range(len(__UpperCamelCase ) ):
lowercase_ : int = featuremaps[i_map]
lowercase_ : List[str] = []
for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[str] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__UpperCamelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__UpperCamelCase ) )
lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase )
featuremap_pooled.append(__UpperCamelCase )
return featuremap_pooled
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Tuple = []
for i in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[Any] = np.shape(data[i] )
lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] )
lowercase_ : List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(__UpperCamelCase )
lowercase_ : int = np.asarray(__UpperCamelCase )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Any = np.asarray(__UpperCamelCase )
lowercase_ : Any = np.shape(__UpperCamelCase )
lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Any = []
lowercase_ : List[Any] = 0
for i_map in range(__UpperCamelCase ):
lowercase_ : List[str] = np.ones((size_map, size_map) )
for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = pd_pool[
i_pool
]
lowercase_ : Any = i_pool + 1
lowercase_ : Optional[int] = np.multiply(
__UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(__UpperCamelCase )
return pd_all
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]:
'''simple docstring'''
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) )
print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) )
lowercase_ : int = 0
lowercase_ : Tuple = []
lowercase_ : Tuple = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowercase_ : List[str] = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(__UpperCamelCase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase_ : int = np.asmatrix(datas_train[p] )
lowercase_ : Any = np.asarray(datas_teach[p] )
lowercase_ , lowercase_ : Tuple = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : Optional[int] = np.shape(__UpperCamelCase )
lowercase_ : Optional[int] = self._expand(__UpperCamelCase )
lowercase_ : int = data_bp_input
lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa
lowercase_ : Dict = self.sig(__UpperCamelCase )
lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa
lowercase_ : int = self.sig(__UpperCamelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase_ : str = np.multiply(
(data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Optional[int] = np.multiply(
np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji )
lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase_ : Dict = pd_conva_pooled.T.getA().tolist()
lowercase_ : List[Any] = self._calculate_gradient_from_pool(
__UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase_ : Dict = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre
lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase_ : int = rp + 1
lowercase_ : Union[str, Any] = error_count / patterns
all_mse.append(__UpperCamelCase )
def draw_error():
lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__UpperCamelCase ,'+-' )
plt.plot(__UpperCamelCase ,'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(__UpperCamelCase ,alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) )
for p in range(len(__UpperCamelCase ) ):
lowercase_ : List[Any] = np.asmatrix(datas_test[p] )
lowercase_ , lowercase_ : Optional[Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : List[str] = self._expand(__UpperCamelCase )
lowercase_ : Any = data_bp_input
lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa
lowercase_ : str = self.sig(__UpperCamelCase )
lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa
lowercase_ : Optional[int] = self.sig(__UpperCamelCase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out]
return np.asarray(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase )
lowercase_ , lowercase_ : Union[str, Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 321
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 321
| 1
|
"""simple docstring"""
__SCREAMING_SNAKE_CASE =[0, 2, 4, 6, 8]
__SCREAMING_SNAKE_CASE =[1, 3, 5, 7, 9]
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowercase_ : Optional[int] = 0
for digit in range(10 ):
lowercase_ : List[str] = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return result
lowercase_ : Dict = 0
for digita in range(10 ):
lowercase_ : Tuple = digita
if (remainder + digita) % 2 == 0:
lowercase_ : Dict = ODD_DIGITS
else:
lowercase_ : Any = EVEN_DIGITS
for digita in other_parity_digits:
lowercase_ : Union[str, Any] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
return result
def lowercase__( __SCREAMING_SNAKE_CASE : int = 9 ):
lowercase_ : List[Any] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__SCREAMING_SNAKE_CASE , 0 , [0] * length , __SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 321
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=33 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_input_mask
lowercase_ : Optional[Any] = use_token_type_ids
lowercase_ : List[str] = use_labels
lowercase_ : Any = vocab_size
lowercase_ : List[str] = hidden_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : Tuple = max_position_embeddings
lowercase_ : Optional[int] = type_vocab_size
lowercase_ : Optional[int] = type_sequence_label_size
lowercase_ : Dict = initializer_range
lowercase_ : int = num_labels
lowercase_ : Any = num_choices
lowercase_ : int = scope
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Dict = None
if self.use_input_mask:
lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Tuple = None
lowercase_ : Tuple = None
lowercase_ : Tuple = None
if self.use_labels:
lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
lowercase_ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : List[Any] = EsmModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Tuple = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Union[str, Any] = model(__UpperCamelCase )
lowercase_ : int = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = EsmForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = self.num_labels
lowercase_ : int = EsmForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Any = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Optional[int] = config_and_inputs
lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = False
lowercase = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = ()
lowercase = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = EsmModelTester(self )
lowercase_ : List[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ : Optional[Any] = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = EsmModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowercase_ : str = EsmEmbeddings(config=__UpperCamelCase )
lowercase_ : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowercase_ : List[Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowercase_ : Tuple = create_position_ids_from_input_ids(__UpperCamelCase ,model.padding_idx )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowercase_ : List[Any] = EsmEmbeddings(config=__UpperCamelCase )
lowercase_ : List[Any] = torch.empty(2 ,4 ,30 )
lowercase_ : List[str] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowercase_ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowercase_ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCamelCase ( lowercase_ ):
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
with torch.no_grad():
lowercase_ : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase_ : List[str] = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = 33
lowercase_ : Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : List[str] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
lowercase_ : int = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase_ : Dict = model(__UpperCamelCase )[0]
# compare the actual values for a slice.
lowercase_ : Any = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321
| 1
|
"""simple docstring"""
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
__SCREAMING_SNAKE_CASE ="bert-base-cased"
__SCREAMING_SNAKE_CASE ="fp16"
__SCREAMING_SNAKE_CASE ="bf16"
__SCREAMING_SNAKE_CASE =[FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCamelCase ( lowercase_ ):
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
lowercase_ : str = dict(
ACCELERATE_USE_FSDP='true' ,MASTER_ADDR='localhost' ,MASTER_PORT='10999' ,RANK='0' ,LOCAL_RANK='0' ,WORLD_SIZE='1' ,)
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__UpperCamelCase ):
lowercase_ : Dict = self.dist_env.copy()
lowercase_ : Union[str, Any] = f'''{i + 1}'''
lowercase_ : int = strategy
with mockenv_context(**__UpperCamelCase ):
lowercase_ : int = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy ,ShardingStrategy(i + 1 ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__UpperCamelCase ):
lowercase_ : Optional[Any] = self.dist_env.copy()
lowercase_ : List[str] = prefetch_policy
with mockenv_context(**__UpperCamelCase ):
lowercase_ : List[Any] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch ,BackwardPrefetch(i + 1 ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__UpperCamelCase ):
lowercase_ : Any = self.dist_env.copy()
lowercase_ : List[Any] = state_dict_type
with mockenv_context(**__UpperCamelCase ):
lowercase_ : Union[str, Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type ,StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Dict = AutoModel.from_pretrained(__UpperCamelCase )
for policy in FSDP_AUTO_WRAP_POLICY:
lowercase_ : List[str] = self.dist_env.copy()
lowercase_ : List[str] = policy
if policy == "TRANSFORMER_BASED_WRAP":
lowercase_ : str = 'BertLayer'
elif policy == "SIZE_BASED_WRAP":
lowercase_ : List[str] = '2000'
with mockenv_context(**__UpperCamelCase ):
lowercase_ : Dict = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__UpperCamelCase )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
lowercase_ : Optional[int] = self.dist_env.copy()
lowercase_ : Optional[int] = 'TRANSFORMER_BASED_WRAP'
lowercase_ : Any = 'T5Layer'
with mockenv_context(**__UpperCamelCase ):
lowercase_ : Optional[int] = FullyShardedDataParallelPlugin()
with self.assertRaises(__UpperCamelCase ) as cm:
fsdp_plugin.set_auto_wrap_policy(__UpperCamelCase )
self.assertTrue('Could not find the transformer layer class to wrap in the model.' in str(cm.exception ) )
lowercase_ : Optional[int] = self.dist_env.copy()
lowercase_ : str = 'SIZE_BASED_WRAP'
lowercase_ : Union[str, Any] = '0'
with mockenv_context(**__UpperCamelCase ):
lowercase_ : Union[str, Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__UpperCamelCase )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
lowercase_ : List[Any] = self.dist_env.copy()
lowercase_ : Optional[Any] = mp_dtype
with mockenv_context(**__UpperCamelCase ):
lowercase_ : Any = Accelerator()
if mp_dtype == "fp16":
lowercase_ : int = torch.floataa
elif mp_dtype == "bf16":
lowercase_ : str = torch.bfloataa
lowercase_ : Optional[Any] = MixedPrecision(param_dtype=__UpperCamelCase ,reduce_dtype=__UpperCamelCase ,buffer_dtype=__UpperCamelCase )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy ,__UpperCamelCase )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler ,__UpperCamelCase ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
lowercase_ : List[str] = self.dist_env.copy()
lowercase_ : Optional[Any] = str(__UpperCamelCase ).lower()
with mockenv_context(**__UpperCamelCase ):
lowercase_ : Optional[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload ,CPUOffload(offload_params=__UpperCamelCase ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCamelCase ( lowercase_ ):
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
lowercase_ : Union[str, Any] = 0.82
lowercase_ : List[str] = [
'fsdp_shard_grad_op_transformer_based_wrap',
'fsdp_full_shard_transformer_based_wrap',
]
lowercase_ : str = {
'multi_gpu_fp16': 3200,
'fsdp_shard_grad_op_transformer_based_wrap_fp16': 2000,
'fsdp_full_shard_transformer_based_wrap_fp16': 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
lowercase_ : Dict = 160
lowercase_ : Optional[Any] = 160
lowercase_ : str = inspect.getfile(accelerate.test_utils )
lowercase_ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps'] )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[str] = os.path.join(self.test_scripts_folder ,'test_performance.py' )
lowercase_ : Optional[int] = ['accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp']
for config in self.performance_configs:
lowercase_ : str = cmd.copy()
for i, strategy in enumerate(__UpperCamelCase ):
if strategy.lower() in config:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append('--mixed_precision=no' )
else:
cmd_config.append('--mixed_precision=fp16' )
if "cpu_offload" in config:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCamelCase ,env=os.environ.copy() )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Dict = os.path.join(self.test_scripts_folder ,'test_checkpointing.py' )
lowercase_ : Optional[int] = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
'--use_fsdp',
'--mixed_precision=fp16',
'--fsdp_transformer_layer_cls_to_wrap=BertLayer',
]
for i, strategy in enumerate(__UpperCamelCase ):
lowercase_ : Any = cmd.copy()
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
lowercase_ : int = len(__UpperCamelCase )
for state_dict_type in FSDP_STATE_DICT_TYPE:
lowercase_ : int = cmd_config[:state_dict_config_index]
cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
'--partial_train_epoch=1',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCamelCase ,env=os.environ.copy() )
lowercase_ : int = cmd_config[:-1]
lowercase_ : int = os.path.join(self.tmpdir ,'epoch_0' )
cmd_config.extend(
[
f'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCamelCase ,env=os.environ.copy() )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = os.path.join(self.test_scripts_folder ,'test_peak_memory_usage.py' )
lowercase_ : Dict = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
lowercase_ : List[Any] = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['--mixed_precision=fp16'] )
else:
cmd_config.extend(['--mixed_precision=no'] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['--use_fsdp'] )
for i, strategy in enumerate(__UpperCamelCase ):
if strategy.lower() in spec:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
f'''--n_train={self.n_train}''',
f'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCamelCase ,env=os.environ.copy() )
| 321
|
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[int] = bp_numa
lowercase_ : Dict = bp_numa
lowercase_ : Tuple = bp_numa
lowercase_ : List[Any] = conva_get[:2]
lowercase_ : int = conva_get[2]
lowercase_ : Dict = size_pa
lowercase_ : int = rate_w
lowercase_ : Union[str, Any] = rate_t
lowercase_ : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1
lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(__UpperCamelCase ,'wb' ) as f:
pickle.dump(__UpperCamelCase ,__UpperCamelCase )
print(f'''Model saved: {save_path}''' )
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
with open(__UpperCamelCase ,'rb' ) as f:
lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301
lowercase_ : str = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' )
lowercase_ : Optional[Any] = model_dic.get('num_bp1' )
lowercase_ : str = model_dic.get('num_bp2' )
lowercase_ : Optional[Any] = model_dic.get('num_bp3' )
lowercase_ : Union[str, Any] = model_dic.get('rate_weight' )
lowercase_ : Optional[int] = model_dic.get('rate_thre' )
# create model instance
lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# modify model parameter
lowercase_ : Optional[Any] = model_dic.get('w_conv1' )
lowercase_ : Tuple = model_dic.get('wkj' )
lowercase_ : Union[str, Any] = model_dic.get('vji' )
lowercase_ : Optional[Any] = model_dic.get('thre_conv1' )
lowercase_ : Dict = model_dic.get('thre_bp2' )
lowercase_ : Optional[int] = model_dic.get('thre_bp3' )
return conv_ins
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return round(__UpperCamelCase ,3 )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = convs[0]
lowercase_ : Any = convs[1]
lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0]
# get the data slice of original image data, data_focus
lowercase_ : Tuple = []
for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
lowercase_ : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__UpperCamelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase_ : Dict = []
lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__UpperCamelCase ):
lowercase_ : Tuple = []
for i_focus in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[int] = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__UpperCamelCase ) )
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape(
__UpperCamelCase ,__UpperCamelCase )
data_featuremap.append(__UpperCamelCase )
# expanding the data slice to One dimenssion
lowercase_ : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) )
lowercase_ : str = np.asarray(__UpperCamelCase )
return focus_list, data_featuremap
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple:
'''simple docstring'''
lowercase_ : Union[str, Any] = len(featuremaps[0] )
lowercase_ : str = int(size_map / size_pooling )
lowercase_ : Optional[int] = []
for i_map in range(len(__UpperCamelCase ) ):
lowercase_ : int = featuremaps[i_map]
lowercase_ : List[str] = []
for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[str] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__UpperCamelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__UpperCamelCase ) )
lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase )
featuremap_pooled.append(__UpperCamelCase )
return featuremap_pooled
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Tuple = []
for i in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[Any] = np.shape(data[i] )
lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] )
lowercase_ : List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(__UpperCamelCase )
lowercase_ : int = np.asarray(__UpperCamelCase )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Any = np.asarray(__UpperCamelCase )
lowercase_ : Any = np.shape(__UpperCamelCase )
lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Any = []
lowercase_ : List[Any] = 0
for i_map in range(__UpperCamelCase ):
lowercase_ : List[str] = np.ones((size_map, size_map) )
for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = pd_pool[
i_pool
]
lowercase_ : Any = i_pool + 1
lowercase_ : Optional[int] = np.multiply(
__UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(__UpperCamelCase )
return pd_all
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]:
'''simple docstring'''
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) )
print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) )
lowercase_ : int = 0
lowercase_ : Tuple = []
lowercase_ : Tuple = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowercase_ : List[str] = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(__UpperCamelCase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase_ : int = np.asmatrix(datas_train[p] )
lowercase_ : Any = np.asarray(datas_teach[p] )
lowercase_ , lowercase_ : Tuple = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : Optional[int] = np.shape(__UpperCamelCase )
lowercase_ : Optional[int] = self._expand(__UpperCamelCase )
lowercase_ : int = data_bp_input
lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa
lowercase_ : Dict = self.sig(__UpperCamelCase )
lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa
lowercase_ : int = self.sig(__UpperCamelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase_ : str = np.multiply(
(data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Optional[int] = np.multiply(
np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji )
lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase_ : Dict = pd_conva_pooled.T.getA().tolist()
lowercase_ : List[Any] = self._calculate_gradient_from_pool(
__UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase_ : Dict = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre
lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase_ : int = rp + 1
lowercase_ : Union[str, Any] = error_count / patterns
all_mse.append(__UpperCamelCase )
def draw_error():
lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__UpperCamelCase ,'+-' )
plt.plot(__UpperCamelCase ,'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(__UpperCamelCase ,alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) )
for p in range(len(__UpperCamelCase ) ):
lowercase_ : List[Any] = np.asmatrix(datas_test[p] )
lowercase_ , lowercase_ : Optional[Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : List[str] = self._expand(__UpperCamelCase )
lowercase_ : Any = data_bp_input
lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa
lowercase_ : str = self.sig(__UpperCamelCase )
lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa
lowercase_ : Optional[int] = self.sig(__UpperCamelCase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out]
return np.asarray(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase )
lowercase_ , lowercase_ : Union[str, Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 321
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__SCREAMING_SNAKE_CASE =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : tuple , __SCREAMING_SNAKE_CASE : Path , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=False , ):
output_path.parent.mkdir(parents=__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=__SCREAMING_SNAKE_CASE , output_names=__SCREAMING_SNAKE_CASE , dynamic_axes=__SCREAMING_SNAKE_CASE , do_constant_folding=__SCREAMING_SNAKE_CASE , use_external_data_format=__SCREAMING_SNAKE_CASE , enable_onnx_checker=__SCREAMING_SNAKE_CASE , opset_version=__SCREAMING_SNAKE_CASE , )
else:
export(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=__SCREAMING_SNAKE_CASE , output_names=__SCREAMING_SNAKE_CASE , dynamic_axes=__SCREAMING_SNAKE_CASE , do_constant_folding=__SCREAMING_SNAKE_CASE , opset_version=__SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ):
lowercase_ : List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase_ : str = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
lowercase_ : str = 'cpu'
lowercase_ : Dict = Path(__SCREAMING_SNAKE_CASE )
# VAE DECODER
lowercase_ : Any = AutoencoderKL.from_pretrained(model_path + '/vae' )
lowercase_ : Optional[int] = vae_decoder.config.latent_channels
# forward only through the decoder part
lowercase_ : int = vae_decoder.decode
onnx_export(
__SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , __SCREAMING_SNAKE_CASE , 25 , 25 ).to(device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=__SCREAMING_SNAKE_CASE , )
del vae_decoder
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
__SCREAMING_SNAKE_CASE =parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 321
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
lowercase_ : Dict = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : str = 'patrickvonplaten/t5-tiny-random'
lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] )
lowercase_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__UpperCamelCase ):
self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'current' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
| 321
| 1
|
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
lowercase_ : str = TOKENIZER_CLASSES
else:
lowercase_ : int = {tokenizer_name: getattr(__SCREAMING_SNAKE_CASE , tokenizer_name + 'Fast' )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
lowercase_ : Dict = TOKENIZER_CLASSES[tokenizer_name]
lowercase_ : Union[str, Any] = True
if checkpoint_name is None:
lowercase_ : Tuple = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowercase_ : List[Any] = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
lowercase_ : List[Any] = tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
lowercase_ , lowercase_ : List[str] = checkpoint.split('/' )
lowercase_ : Dict = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif add_prefix:
lowercase_ : List[str] = checkpoint
lowercase_ : Optional[Any] = dump_path
else:
lowercase_ : int = None
lowercase_ : str = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowercase_ : int = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowercase_ : List[str] = file_path.split(__SCREAMING_SNAKE_CASE )[-1][0]
if next_char == "/":
lowercase_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
lowercase_ : Optional[int] = tokenizer.save_pretrained(
__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE , filename_prefix=__SCREAMING_SNAKE_CASE )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__SCREAMING_SNAKE_CASE )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 321
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( lowercase_ ):
lowercase = ['input_values', 'padding_mask']
def __init__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = 2_4000 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
super().__init__(feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : List[str] = chunk_length_s
lowercase_ : Tuple = overlap
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
lowercase_ : Optional[int] = True
lowercase_ : Optional[int] = bool(
isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ):
lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa )
elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowercase_ : List[str] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowercase_ : Dict = [np.asarray(__UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__UpperCamelCase ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
lowercase_ : Optional[int] = None
lowercase_ : List[Any] = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowercase_ : List[Any] = min(array.shape[0] for array in raw_audio )
lowercase_ : int = int(np.floor(max_length / self.chunk_stride ) )
lowercase_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowercase_ : List[Any] = max(array.shape[0] for array in raw_audio )
lowercase_ : Tuple = int(np.ceil(max_length / self.chunk_stride ) )
lowercase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
lowercase_ : Union[str, Any] = 'max_length'
else:
lowercase_ : int = input_values
# normal padding on batch
if padded_inputs is None:
lowercase_ : int = self.pad(
__UpperCamelCase ,max_length=__UpperCamelCase ,truncation=__UpperCamelCase ,padding=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,)
if padding:
lowercase_ : Optional[int] = padded_inputs.pop('attention_mask' )
lowercase_ : Dict = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
lowercase_ : Optional[int] = example[..., None]
input_values.append(example.T )
lowercase_ : str = input_values
if return_tensors is not None:
lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs
| 321
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE ={
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =["ConditionalDetrFeatureExtractor"]
__SCREAMING_SNAKE_CASE =["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 321
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 321
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=6 ,__UpperCamelCase=17 ,__UpperCamelCase=23 ,__UpperCamelCase=11 ,__UpperCamelCase=True ,) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : int = parent
lowercase_ : str = batch_size
lowercase_ : Any = seq_length
lowercase_ : List[str] = act_dim
lowercase_ : int = state_dim
lowercase_ : Dict = hidden_size
lowercase_ : int = max_length
lowercase_ : int = is_training
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Any = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase_ : int = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase_ : List[str] = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ : Dict = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ : Tuple = ids_tensor((self.batch_size, self.seq_length) ,vocab_size=1000 )
lowercase_ : int = random_attention_mask((self.batch_size, self.seq_length) )
lowercase_ : Optional[int] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size ,seq_length=self.seq_length ,act_dim=self.act_dim ,state_dim=self.state_dim ,hidden_size=self.hidden_size ,max_length=self.max_length ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> Any:
'''simple docstring'''
lowercase_ : List[Any] = DecisionTransformerModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Tuple = model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
self.parent.assertEqual(result.state_preds.shape ,states.shape )
self.parent.assertEqual(result.action_preds.shape ,actions.shape )
self.parent.assertEqual(result.return_preds.shape ,returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Dict = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Optional[Any] = config_and_inputs
lowercase_ : Optional[Any] = {
'states': states,
'actions': actions,
'rewards': rewards,
'returns_to_go': returns_to_go,
'timesteps': timesteps,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (DecisionTransformerModel,) if is_torch_available() else ()
lowercase = ()
lowercase = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowercase = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = DecisionTransformerModelTester(self )
lowercase_ : str = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Tuple = DecisionTransformerModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Tuple = model_class(__UpperCamelCase )
lowercase_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Optional[Any] = [*signature.parameters.keys()]
lowercase_ : Tuple = [
'states',
'actions',
'rewards',
'returns_to_go',
'timesteps',
'attention_mask',
]
self.assertListEqual(arg_names[: len(__UpperCamelCase )] ,__UpperCamelCase )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : List[Any] = 2 # number of steps of autoregressive prediction we will perform
lowercase_ : Any = 10 # defined by the RL environment, may be normalized
lowercase_ : Union[str, Any] = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert' )
lowercase_ : List[str] = model.to(__UpperCamelCase )
lowercase_ : Any = model.config
torch.manual_seed(0 )
lowercase_ : int = torch.randn(1 ,1 ,config.state_dim ).to(device=__UpperCamelCase ,dtype=torch.floataa ) # env.reset()
lowercase_ : List[Any] = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] ,device=__UpperCamelCase )
lowercase_ : Any = torch.tensor(__UpperCamelCase ,device=__UpperCamelCase ,dtype=torch.floataa ).reshape(1 ,1 ,1 )
lowercase_ : str = state
lowercase_ : Dict = torch.zeros(1 ,0 ,config.act_dim ,device=__UpperCamelCase ,dtype=torch.floataa )
lowercase_ : Any = torch.zeros(1 ,0 ,device=__UpperCamelCase ,dtype=torch.floataa )
lowercase_ : Union[str, Any] = torch.tensor(0 ,device=__UpperCamelCase ,dtype=torch.long ).reshape(1 ,1 )
for step in range(__UpperCamelCase ):
lowercase_ : Union[str, Any] = torch.cat([actions, torch.zeros(1 ,1 ,config.act_dim ,device=__UpperCamelCase )] ,dim=1 )
lowercase_ : Tuple = torch.cat([rewards, torch.zeros(1 ,1 ,device=__UpperCamelCase )] ,dim=1 )
lowercase_ : List[Any] = torch.ones(1 ,states.shape[1] ).to(dtype=torch.long ,device=states.device )
with torch.no_grad():
lowercase_ , lowercase_ , lowercase_ : Tuple = model(
states=__UpperCamelCase ,actions=__UpperCamelCase ,rewards=__UpperCamelCase ,returns_to_go=__UpperCamelCase ,timesteps=__UpperCamelCase ,attention_mask=__UpperCamelCase ,return_dict=__UpperCamelCase ,)
self.assertEqual(action_pred.shape ,actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] ,expected_outputs[step] ,atol=1e-4 ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = ( # env.step(action)
torch.randn(1 ,1 ,config.state_dim ).to(device=__UpperCamelCase ,dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase_ : Optional[int] = action_pred[0, -1]
lowercase_ : Optional[Any] = torch.cat([states, state] ,dim=1 )
lowercase_ : str = returns_to_go[0, -1] - reward
lowercase_ : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 ,1 ,1 )] ,dim=1 )
lowercase_ : int = torch.cat(
[timesteps, torch.ones((1, 1) ,device=__UpperCamelCase ,dtype=torch.long ) * (step + 1)] ,dim=1 )
| 321
|
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ):
require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
| 321
| 1
|
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ="▁"
__SCREAMING_SNAKE_CASE ={"vocab_file": "prophetnet.tokenizer"}
__SCREAMING_SNAKE_CASE ={
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
__SCREAMING_SNAKE_CASE ={
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
__SCREAMING_SNAKE_CASE ={
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def lowercase__( __SCREAMING_SNAKE_CASE : Dict ):
lowercase_ : str = collections.OrderedDict()
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as reader:
lowercase_ : int = reader.readlines()
for index, token in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ : List[str] = token.rstrip('\n' )
lowercase_ : str = index
return vocab
class UpperCamelCase ( lowercase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self ,__UpperCamelCase ,__UpperCamelCase="[SEP]" ,__UpperCamelCase="[SEP]" ,__UpperCamelCase="[SEP]" ,__UpperCamelCase="[UNK]" ,__UpperCamelCase="[PAD]" ,__UpperCamelCase="[CLS]" ,__UpperCamelCase="[MASK]" ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> None:
'''simple docstring'''
lowercase_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCamelCase ,)
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
lowercase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
lowercase_ : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowercase_ : Optional[Any] = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4}
for i in range(10 ):
lowercase_ : List[Any] = f'''[unused{i}]'''
lowercase_ : Any = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowercase_ : List[str] = 12
lowercase_ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(__UpperCamelCase )
def __getstate__( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[Any] = self.__dict__.copy()
lowercase_ : List[str] = None
return state
def __setstate__( self ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
lowercase_ : Union[str, Any] = {}
lowercase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase ,token_ids_a=__UpperCamelCase ,already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return ([0] * len(__UpperCamelCase )) + [1]
return ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : Optional[int] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[Any] = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str:
'''simple docstring'''
return self.sp_model.encode(__UpperCamelCase ,out_type=__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ : int = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = ''.join(__UpperCamelCase ).replace(__UpperCamelCase ,' ' ).strip()
return out_string
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Union[str, Any] = os.path.join(
__UpperCamelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase ,'wb' ) as fi:
lowercase_ : Tuple = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowercase_ : List[str] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 321
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False ):
lowercase_ : int = 'backbone.' if is_semantic else ''
lowercase_ : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=False ):
for i in range(config.num_hidden_layers ):
lowercase_ : Any = 'backbone.' if is_semantic else ''
# queries, keys and values
lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' )
lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' )
lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' )
lowercase_ : List[str] = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = q_bias
lowercase_ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Any = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowercase_ : Any = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' )
lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' )
lowercase_ : Tuple = gamma_a
lowercase_ : List[Any] = gamma_a
def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = val
def lowercase__( ):
lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ):
lowercase_ : List[str] = False if 'rvlcdip' in checkpoint_url else True
lowercase_ : Dict = BeitConfig(use_absolute_position_embeddings=__SCREAMING_SNAKE_CASE , use_mask_token=__SCREAMING_SNAKE_CASE )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowercase_ : Any = 10_24
lowercase_ : List[str] = 40_96
lowercase_ : Tuple = 24
lowercase_ : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
lowercase_ : Optional[Any] = 16
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : int = 'rvlcdip-id2label.json'
lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase_ : str = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowercase_ : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
lowercase_ : Optional[Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowercase_ : Optional[int] = BeitForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(__SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Check outputs on an image
lowercase_ : List[Any] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__SCREAMING_SNAKE_CASE )
lowercase_ : str = prepare_img()
lowercase_ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
lowercase_ : int = encoding['pixel_values']
lowercase_ : Any = model(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = outputs.logits
# verify logits
lowercase_ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(__SCREAMING_SNAKE_CASE ), "Shape of logits not as expected"
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
if has_lm_head:
lowercase_ : List[str] = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
lowercase_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , )
model.push_to_hub(
repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 321
| 1
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = parent
lowercase_ : Tuple = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Optional[Any] = is_training
lowercase_ : Any = use_input_mask
lowercase_ : Optional[Any] = vocab_size
lowercase_ : str = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Union[str, Any] = use_labels
lowercase_ : Union[str, Any] = scope
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : List[str] = None
if self.use_input_mask:
lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : str = self.prepare_config_and_inputs()
lowercase_ : int = True
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = True
lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Union[str, Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,)
lowercase_ : Dict = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int:
'''simple docstring'''
lowercase_ : List[str] = True
lowercase_ : Union[str, Any] = True
lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
# first forward pass
lowercase_ : str = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,)
lowercase_ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 )
lowercase_ : int = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
lowercase_ : List[Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
# select random slice
lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoderTester(self )
lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs()
lowercase_ : Optional[int] = 'bert'
self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase_ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Tuple = model(__UpperCamelCase )[0]
lowercase_ : Dict = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : str = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Dict = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : Dict = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321
|
"""simple docstring"""
__SCREAMING_SNAKE_CASE ={
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
__SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()}
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Union[str, Any] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
lowercase_ : Dict = ''
for word in coded.split():
while len(__SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
lowercase_ : Any = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 321
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE ={
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__SCREAMING_SNAKE_CASE =["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 321
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
def count_of_possible_combinations_with_dp_array(
__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowercase_ : str = sum(
count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE )
for item in array )
lowercase_ : Tuple = answer
return answer
lowercase_ : Optional[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Dict = [0] * (target + 1)
lowercase_ : Dict = 1
for i in range(1 , target + 1 ):
for j in range(__SCREAMING_SNAKE_CASE ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE =3
__SCREAMING_SNAKE_CASE =5
__SCREAMING_SNAKE_CASE =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 321
| 1
|
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any]=None ):
lowercase_ : Optional[int] = None
if token is not None:
lowercase_ : List[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''}
lowercase_ : List[str] = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
lowercase_ : Union[str, Any] = requests.get(__SCREAMING_SNAKE_CASE , headers=__SCREAMING_SNAKE_CASE ).json()
lowercase_ : Union[str, Any] = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
lowercase_ : str = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[int] = requests.get(url + F'''&page={i + 2}''' , headers=__SCREAMING_SNAKE_CASE ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int]=None ):
lowercase_ : Optional[int] = None
if token is not None:
lowercase_ : Tuple = {'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''}
lowercase_ : Union[str, Any] = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
lowercase_ : Tuple = requests.get(__SCREAMING_SNAKE_CASE , headers=__SCREAMING_SNAKE_CASE ).json()
lowercase_ : Any = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
lowercase_ : Optional[Any] = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : List[str] = requests.get(url + F'''&page={i + 2}''' , headers=__SCREAMING_SNAKE_CASE ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : str = None
if token is not None:
lowercase_ : str = {'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''}
lowercase_ : Dict = requests.get(__SCREAMING_SNAKE_CASE , headers=__SCREAMING_SNAKE_CASE , allow_redirects=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = result.headers['Location']
lowercase_ : str = requests.get(__SCREAMING_SNAKE_CASE , allow_redirects=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = os.path.join(__SCREAMING_SNAKE_CASE , F'''{artifact_name}.zip''' )
with open(__SCREAMING_SNAKE_CASE , 'wb' ) as fp:
fp.write(response.content )
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=None ):
lowercase_ : Tuple = []
lowercase_ : int = []
lowercase_ : str = None
with zipfile.ZipFile(__SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__SCREAMING_SNAKE_CASE ) as f:
for line in f:
lowercase_ : Optional[Any] = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowercase_ : Any = line[: line.index(': ' )]
lowercase_ : str = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
lowercase_ : Tuple = line[len('FAILED ' ) :]
failed_tests.append(__SCREAMING_SNAKE_CASE )
elif filename == "job_name.txt":
lowercase_ : List[str] = line
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(__SCREAMING_SNAKE_CASE )} for `errors` '''
F'''and {len(__SCREAMING_SNAKE_CASE )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
' problem.' )
lowercase_ : Any = None
if job_name and job_links:
lowercase_ : str = job_links.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# A list with elements of the form (line of error, error, failed test)
lowercase_ : Optional[Any] = [x + [y] + [job_link] for x, y in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )]
return result
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple=None ):
lowercase_ : Union[str, Any] = []
lowercase_ : Dict = [os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for p in os.listdir(__SCREAMING_SNAKE_CASE ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__SCREAMING_SNAKE_CASE , job_links=__SCREAMING_SNAKE_CASE ) )
return errors
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str=None ):
lowercase_ : str = Counter()
counter.update([x[1] for x in logs] )
lowercase_ : Any = counter.most_common()
lowercase_ : List[str] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowercase_ : Union[str, Any] = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
lowercase_ : int = dict(sorted(r.items() , key=lambda __SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=__SCREAMING_SNAKE_CASE ) )
return r
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : Dict = test.split('::' )[0]
if test.startswith('tests/models/' ):
lowercase_ : str = test.split('/' )[2]
else:
lowercase_ : List[str] = None
return test
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int=None ):
lowercase_ : Optional[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowercase_ : Union[str, Any] = [x for x in logs if x[2] is not None]
lowercase_ : List[Any] = {x[2] for x in logs}
lowercase_ : List[str] = {}
for test in tests:
lowercase_ : Any = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowercase_ : Optional[Any] = counter.most_common()
lowercase_ : Tuple = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowercase_ : Any = sum(error_counts.values() )
if n_errors > 0:
lowercase_ : str = {'count': n_errors, 'errors': error_counts}
lowercase_ : Optional[Any] = dict(sorted(r.items() , key=lambda __SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=__SCREAMING_SNAKE_CASE ) )
return r
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : List[Any] = '| no. | error | status |'
lowercase_ : int = '|-:|:-|:-|'
lowercase_ : List[str] = [header, sep]
for error in reduced_by_error:
lowercase_ : List[Any] = reduced_by_error[error]['count']
lowercase_ : int = F'''| {count} | {error[:1_00]} | |'''
lines.append(__SCREAMING_SNAKE_CASE )
return "\n".join(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase_ : str = '| model | no. of errors | major error | count |'
lowercase_ : Optional[int] = '|-:|-:|-:|-:|'
lowercase_ : Optional[Any] = [header, sep]
for model in reduced_by_model:
lowercase_ : Tuple = reduced_by_model[model]['count']
lowercase_ , lowercase_ : Tuple = list(reduced_by_model[model]['errors'].items() )[0]
lowercase_ : Optional[Any] = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(__SCREAMING_SNAKE_CASE )
return "\n".join(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
__SCREAMING_SNAKE_CASE =parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__SCREAMING_SNAKE_CASE =get_job_links(args.workflow_run_id, token=args.token)
__SCREAMING_SNAKE_CASE ={}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__SCREAMING_SNAKE_CASE =k.find(" / ")
__SCREAMING_SNAKE_CASE =k[index + len(" / ") :]
__SCREAMING_SNAKE_CASE =v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__SCREAMING_SNAKE_CASE =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__SCREAMING_SNAKE_CASE =get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__SCREAMING_SNAKE_CASE =Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__SCREAMING_SNAKE_CASE =counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__SCREAMING_SNAKE_CASE =reduce_by_error(errors)
__SCREAMING_SNAKE_CASE =reduce_by_model(errors)
__SCREAMING_SNAKE_CASE =make_github_table(reduced_by_error)
__SCREAMING_SNAKE_CASE =make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 321
|
"""simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : int = set_counts
lowercase_ : List[Any] = max(__UpperCamelCase )
lowercase_ : Union[str, Any] = len(__UpperCamelCase )
lowercase_ : Dict = [1] * num_sets
lowercase_ : Optional[int] = list(range(__UpperCamelCase ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase )
lowercase_ : int = self.get_parent(__UpperCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase_ : Tuple = 0
lowercase_ : str = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase_ : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase_ : str = 0
lowercase_ : Tuple = src_parent
lowercase_ : int = self.set_counts[src_parent]
lowercase_ : str = max(self.max_set ,__UpperCamelCase )
return True
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 321
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase ( unittest.TestCase ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=3 ,__UpperCamelCase=224 ,__UpperCamelCase=30 ,__UpperCamelCase=400 ,__UpperCamelCase=True ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase=[0.5, 0.5, 0.5] ,__UpperCamelCase=[0.5, 0.5, 0.5] ,) -> int:
'''simple docstring'''
lowercase_ : List[str] = size if size is not None else {'height': 18, 'width': 18}
lowercase_ : Optional[int] = parent
lowercase_ : Dict = batch_size
lowercase_ : Any = num_channels
lowercase_ : Optional[int] = image_size
lowercase_ : List[str] = min_resolution
lowercase_ : Dict = max_resolution
lowercase_ : Tuple = do_resize
lowercase_ : List[str] = size
lowercase_ : int = do_normalize
lowercase_ : List[str] = image_mean
lowercase_ : Any = image_std
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
lowercase = ViTImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : int = EfficientFormerImageProcessorTester(self )
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase ,'image_mean' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'image_std' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'do_normalize' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'do_resize' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'size' ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : Any = prepare_image_inputs(self.image_proc_tester ,equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase ,Image.Image )
# Test not batched input
lowercase_ : Union[str, Any] = image_processor(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
# Test batched
lowercase_ : Dict = image_processor(__UpperCamelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : Any = prepare_image_inputs(self.image_proc_tester ,equal_resolution=__UpperCamelCase ,numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase ,np.ndarray )
# Test not batched input
lowercase_ : Any = image_processor(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
# Test batched
lowercase_ : List[str] = image_processor(__UpperCamelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Optional[Any] = prepare_image_inputs(self.image_proc_tester ,equal_resolution=__UpperCamelCase ,torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase ,torch.Tensor )
# Test not batched input
lowercase_ : Tuple = image_processor(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
# Test batched
lowercase_ : Any = image_processor(__UpperCamelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
| 321
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__SCREAMING_SNAKE_CASE ={
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__SCREAMING_SNAKE_CASE ={"facebook/blenderbot-3B": 128}
class UpperCamelCase ( lowercase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BlenderbotTokenizer
def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]:
'''simple docstring'''
super().__init__(
__UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,)
lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space:
lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) )
lowercase_ : Any = add_prefix_space
lowercase_ : Tuple = pre_tok_class(**__UpperCamelCase )
lowercase_ : int = add_prefix_space
lowercase_ : Any = 'post_processor'
lowercase_ : Optional[Any] = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase )
if tokenizer_component_instance:
lowercase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ : str = tuple(state['sep'] )
if "cls" in state:
lowercase_ : Union[str, Any] = tuple(state['cls'] )
lowercase_ : str = False
if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space:
lowercase_ : Dict = add_prefix_space
lowercase_ : int = True
if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets:
lowercase_ : Optional[Any] = trim_offsets
lowercase_ : Tuple = True
if changes_to_apply:
lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,state.pop('type' ) )
lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value
lowercase_ : str = value
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ : Optional[int] = kwargs.get('is_split_into_words' ,__UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : int = [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Any:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]:
'''simple docstring'''
lowercase_ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
lowercase_ : Dict = ' '.join(__UpperCamelCase )
lowercase_ : str = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
lowercase_ : List[str] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 321
| 1
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowercase_ : List[str] = mf_knapsack(i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
lowercase_ : Tuple = max(
mf_knapsack(i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , mf_knapsack(i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , j - wt[i - 1] ) + val[i - 1] , )
lowercase_ : int = val
return f[i][j]
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : str = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowercase_ : Tuple = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowercase_ : int = dp[i - 1][w_]
return dp[n][w_], dp
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list ):
if not (isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
lowercase_ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE )
if num_items != len(__SCREAMING_SNAKE_CASE ):
lowercase_ : List[str] = (
'The number of weights must be the same as the number of values.\n'
F'''But got {num_items} weights and {len(__SCREAMING_SNAKE_CASE )} values'''
)
raise ValueError(__SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE ):
if not isinstance(wt[i] , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[int] = (
'All weights must be integers but got weight of '
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : Union[str, Any] = knapsack(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : set = set()
_construct_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return optimal_val, example_optional_set
def lowercase__( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : set ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
optimal_set.add(__SCREAMING_SNAKE_CASE )
_construct_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i - 1 , j - wt[i - 1] , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =[3, 2, 4, 4]
__SCREAMING_SNAKE_CASE =[4, 3, 2, 3]
__SCREAMING_SNAKE_CASE =4
__SCREAMING_SNAKE_CASE =6
__SCREAMING_SNAKE_CASE =[[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE =knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE =knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 321
|
"""simple docstring"""
import os
import sys
import unittest
__SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py")
__SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase )
lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase )
lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'}
lowercase_ : Union[str, Any] = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase )
lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase )
lowercase_ : Any = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
lowercase_ : Any = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase )
lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase )
lowercase_ : Tuple = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
lowercase_ : Optional[Any] = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
| 321
| 1
|
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase ( lowercase_ ):
lowercase = (DDPMParallelScheduler,)
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Optional[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**__UpperCamelCase )
return config
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__UpperCamelCase ,beta_end=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
self.check_over_configs(thresholding=__UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__UpperCamelCase ,prediction_type=__UpperCamelCase ,sample_max_value=__UpperCamelCase ,)
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Any = self.scheduler_classes[0]
lowercase_ : Optional[int] = self.get_scheduler_config()
lowercase_ : int = scheduler_class(**__UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : str = self.scheduler_classes[0]
lowercase_ : Tuple = self.get_scheduler_config()
lowercase_ : List[str] = scheduler_class(**__UpperCamelCase )
lowercase_ : Union[str, Any] = len(__UpperCamelCase )
lowercase_ : str = self.dummy_model()
lowercase_ : Any = self.dummy_sample_deter
lowercase_ : int = self.dummy_sample_deter + 0.1
lowercase_ : Any = self.dummy_sample_deter - 0.1
lowercase_ : List[Any] = samplea.shape[0]
lowercase_ : Optional[Any] = torch.stack([samplea, samplea, samplea] ,dim=0 )
lowercase_ : Any = torch.arange(__UpperCamelCase )[0:3, None].repeat(1 ,__UpperCamelCase )
lowercase_ : Tuple = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
lowercase_ : Any = scheduler.batch_step_no_noise(__UpperCamelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) )
lowercase_ : int = torch.sum(torch.abs(__UpperCamelCase ) )
lowercase_ : Any = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 1153.1833 ) < 1e-2
assert abs(result_mean.item() - 0.5005 ) < 1e-3
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Tuple = self.scheduler_classes[0]
lowercase_ : Optional[int] = self.get_scheduler_config()
lowercase_ : Dict = scheduler_class(**__UpperCamelCase )
lowercase_ : Optional[Any] = len(__UpperCamelCase )
lowercase_ : List[Any] = self.dummy_model()
lowercase_ : str = self.dummy_sample_deter
lowercase_ : str = torch.manual_seed(0 )
for t in reversed(range(__UpperCamelCase ) ):
# 1. predict noise residual
lowercase_ : Any = model(__UpperCamelCase ,__UpperCamelCase )
# 2. predict previous mean of sample x_t-1
lowercase_ : int = scheduler.step(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,generator=__UpperCamelCase ).prev_sample
lowercase_ : Dict = pred_prev_sample
lowercase_ : List[Any] = torch.sum(torch.abs(__UpperCamelCase ) )
lowercase_ : List[str] = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = self.scheduler_classes[0]
lowercase_ : Any = self.get_scheduler_config(prediction_type='v_prediction' )
lowercase_ : Union[str, Any] = scheduler_class(**__UpperCamelCase )
lowercase_ : List[Any] = len(__UpperCamelCase )
lowercase_ : int = self.dummy_model()
lowercase_ : str = self.dummy_sample_deter
lowercase_ : Dict = torch.manual_seed(0 )
for t in reversed(range(__UpperCamelCase ) ):
# 1. predict noise residual
lowercase_ : List[Any] = model(__UpperCamelCase ,__UpperCamelCase )
# 2. predict previous mean of sample x_t-1
lowercase_ : List[str] = scheduler.step(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,generator=__UpperCamelCase ).prev_sample
lowercase_ : Tuple = pred_prev_sample
lowercase_ : str = torch.sum(torch.abs(__UpperCamelCase ) )
lowercase_ : str = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Dict = self.scheduler_classes[0]
lowercase_ : List[Any] = self.get_scheduler_config()
lowercase_ : Union[str, Any] = scheduler_class(**__UpperCamelCase )
lowercase_ : int = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__UpperCamelCase )
lowercase_ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(__UpperCamelCase ):
if i == len(__UpperCamelCase ) - 1:
lowercase_ : Any = -1
else:
lowercase_ : Tuple = timesteps[i + 1]
lowercase_ : List[str] = scheduler.previous_timestep(__UpperCamelCase )
lowercase_ : Tuple = prev_t.item()
self.assertEqual(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = self.scheduler_classes[0]
lowercase_ : Optional[Any] = self.get_scheduler_config()
lowercase_ : Optional[Any] = scheduler_class(**__UpperCamelCase )
lowercase_ : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(__UpperCamelCase ,msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = self.scheduler_classes[0]
lowercase_ : List[Any] = self.get_scheduler_config()
lowercase_ : int = scheduler_class(**__UpperCamelCase )
lowercase_ : List[str] = [100, 87, 50, 1, 0]
lowercase_ : List[str] = len(__UpperCamelCase )
with self.assertRaises(__UpperCamelCase ,msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=__UpperCamelCase ,timesteps=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : List[Any] = self.scheduler_classes[0]
lowercase_ : Optional[Any] = self.get_scheduler_config()
lowercase_ : Optional[int] = scheduler_class(**__UpperCamelCase )
lowercase_ : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__UpperCamelCase ,msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' ,):
scheduler.set_timesteps(timesteps=__UpperCamelCase )
| 321
|
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ):
with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX )
try:
print(*__SCREAMING_SNAKE_CASE )
finally:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
__SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank)
__SCREAMING_SNAKE_CASE =socket.gethostname()
__SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__SCREAMING_SNAKE_CASE =dist.get_rank()
__SCREAMING_SNAKE_CASE =dist.get_world_size()
printflock(F"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(F"{gpu} is broken")
raise
| 321
| 1
|
"""simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCamelCase ( lowercase_ ):
lowercase = 'M-CLIP'
def __init__( self ,__UpperCamelCase=1024 ,__UpperCamelCase=768 ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = transformerDimSize
lowercase_ : List[Any] = imageDimSize
super().__init__(**__UpperCamelCase )
class UpperCamelCase ( lowercase_ ):
lowercase = MCLIPConfig
def __init__( self ,__UpperCamelCase ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
super().__init__(__UpperCamelCase ,*__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : str = XLMRobertaModel(__UpperCamelCase )
lowercase_ : Tuple = torch.nn.Linear(
in_features=config.transformerDimensions ,out_features=config.numDims )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[str] = self.transformer(input_ids=__UpperCamelCase ,attention_mask=__UpperCamelCase )[0]
lowercase_ : str = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__UpperCamelCase ), embs
| 321
|
"""simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : List[Any] = name
lowercase_ : int = val
def __str__( self ) -> Tuple:
'''simple docstring'''
return f'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return self.val < other.val
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = {}
lowercase_ : Tuple = {}
lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase )
def __getitem__( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
return self.get_value(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
return (idx - 1) // 2
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return idx * 2 + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return idx * 2 + 2
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
return self.heap_dict[key]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1
lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
lowercase_ : Any = idx
lowercase_ : str = i.val
for i in range(__UpperCamelCase ,-1 ,-1 ):
self.sift_down(__UpperCamelCase ,__UpperCamelCase )
return array
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
while True:
lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase )
lowercase_ : List[str] = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
lowercase_ : List[str] = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
lowercase_ : Dict = r
if smallest != idx:
lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx]
(
(
lowercase_
) , (
lowercase_
) ,
) : str = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowercase_ : Any = smallest
else:
break
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowercase_ : int = p
lowercase_ : str = self.get_parent_idx(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return self.heap[0]
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowercase_ : Tuple = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 ,self.heap )
return x
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
self.heap.append(__UpperCamelCase )
lowercase_ : Tuple = len(self.heap ) - 1
lowercase_ : Optional[int] = node.val
self.sift_up(len(self.heap ) - 1 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.heap ) == 0
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowercase_ : Any = new_value
lowercase_ : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
__SCREAMING_SNAKE_CASE =Node("R", -1)
__SCREAMING_SNAKE_CASE =Node("B", 6)
__SCREAMING_SNAKE_CASE =Node("A", 3)
__SCREAMING_SNAKE_CASE =Node("X", 1)
__SCREAMING_SNAKE_CASE =Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
| 1
|
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__SCREAMING_SNAKE_CASE =False
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ="ybelkada/fonts"
def lowercase__( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
'Pix2StructImageProcessor. Please upgrade torch.' )
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(__SCREAMING_SNAKE_CASE , ['torch'] )
_check_torch_version()
lowercase_ : List[Any] = image_tensor.unsqueeze(0 )
lowercase_ : List[str] = torch.nn.functional.unfold(__SCREAMING_SNAKE_CASE , (patch_height, patch_width) , stride=(patch_height, patch_width) )
lowercase_ : List[Any] = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , -1 )
lowercase_ : Union[str, Any] = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 36 , __SCREAMING_SNAKE_CASE : str = "black" , __SCREAMING_SNAKE_CASE : str = "white" , __SCREAMING_SNAKE_CASE : int = 5 , __SCREAMING_SNAKE_CASE : int = 5 , __SCREAMING_SNAKE_CASE : int = 5 , __SCREAMING_SNAKE_CASE : int = 5 , __SCREAMING_SNAKE_CASE : Optional[bytes] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , ):
requires_backends(__SCREAMING_SNAKE_CASE , 'vision' )
# Add new lines so that each line is no more than 80 characters.
lowercase_ : List[Any] = textwrap.TextWrapper(width=80 )
lowercase_ : List[str] = wrapper.wrap(text=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = '\n'.join(__SCREAMING_SNAKE_CASE )
if font_bytes is not None and font_path is None:
lowercase_ : List[Any] = io.BytesIO(__SCREAMING_SNAKE_CASE )
elif font_path is not None:
lowercase_ : Dict = font_path
else:
lowercase_ : Any = hf_hub_download(__SCREAMING_SNAKE_CASE , 'Arial.TTF' )
lowercase_ : Optional[Any] = ImageFont.truetype(__SCREAMING_SNAKE_CASE , encoding='UTF-8' , size=__SCREAMING_SNAKE_CASE )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
lowercase_ : int = ImageDraw.Draw(Image.new('RGB' , (1, 1) , __SCREAMING_SNAKE_CASE ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Dict = temp_draw.textbbox((0, 0) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Create the actual image with a bit of padding around the text.
lowercase_ : List[Any] = text_width + left_padding + right_padding
lowercase_ : List[str] = text_height + top_padding + bottom_padding
lowercase_ : Optional[int] = Image.new('RGB' , (image_width, image_height) , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = ImageDraw.Draw(__SCREAMING_SNAKE_CASE )
draw.text(xy=(left_padding, top_padding) , text=__SCREAMING_SNAKE_CASE , fill=__SCREAMING_SNAKE_CASE , font=__SCREAMING_SNAKE_CASE )
return image
def lowercase__( __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Optional[Any] ):
requires_backends(__SCREAMING_SNAKE_CASE , 'vision' )
# Convert to PIL image if necessary
lowercase_ : Optional[Any] = to_pil_image(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = render_text(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = max(header_image.width , image.width )
lowercase_ : Union[str, Any] = int(image.height * (new_width / image.width) )
lowercase_ : List[str] = int(header_image.height * (new_width / header_image.width) )
lowercase_ : Tuple = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
lowercase_ : Tuple = to_numpy_array(__SCREAMING_SNAKE_CASE )
if infer_channel_dimension_format(__SCREAMING_SNAKE_CASE ) == ChannelDimension.LAST:
lowercase_ : Optional[int] = to_channel_dimension_format(__SCREAMING_SNAKE_CASE , ChannelDimension.LAST )
return new_image
class UpperCamelCase ( lowercase_ ):
lowercase = ['flattened_patches']
def __init__( self ,__UpperCamelCase = True ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = 2048 ,__UpperCamelCase = False ,**__UpperCamelCase ,) -> None:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
lowercase_ : Optional[int] = patch_size if patch_size is not None else {'height': 16, 'width': 16}
lowercase_ : Optional[int] = do_normalize
lowercase_ : List[Any] = do_convert_rgb
lowercase_ : str = max_patches
lowercase_ : str = is_vqa
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) -> np.ndarray:
'''simple docstring'''
requires_backends(self.extract_flattened_patches ,'torch' )
_check_torch_version()
# convert to torch
lowercase_ : Tuple = to_channel_dimension_format(__UpperCamelCase ,ChannelDimension.FIRST )
lowercase_ : List[Any] = torch.from_numpy(__UpperCamelCase )
lowercase_ , lowercase_ : int = patch_size['height'], patch_size['width']
lowercase_ , lowercase_ : Optional[Any] = get_image_size(__UpperCamelCase )
# maximize scale s.t.
lowercase_ : List[str] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
lowercase_ : str = max(min(math.floor(scale * image_height / patch_height ) ,__UpperCamelCase ) ,1 )
lowercase_ : List[str] = max(min(math.floor(scale * image_width / patch_width ) ,__UpperCamelCase ) ,1 )
lowercase_ : int = max(num_feasible_rows * patch_height ,1 )
lowercase_ : str = max(num_feasible_cols * patch_width ,1 )
lowercase_ : Union[str, Any] = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='bilinear' ,align_corners=__UpperCamelCase ,antialias=__UpperCamelCase ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
lowercase_ : Optional[Any] = torch_extract_patches(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Tuple = patches.shape
lowercase_ : Tuple = patches_shape[1]
lowercase_ : List[str] = patches_shape[2]
lowercase_ : Tuple = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
lowercase_ : Optional[int] = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
lowercase_ : Union[str, Any] = torch.arange(__UpperCamelCase ).reshape([rows, 1] ).repeat(1 ,__UpperCamelCase ).reshape([rows * columns, 1] )
lowercase_ : Optional[int] = torch.arange(__UpperCamelCase ).reshape([1, columns] ).repeat(__UpperCamelCase ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
lowercase_ : Any = row_ids.to(torch.floataa )
lowercase_ : Optional[int] = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
lowercase_ : List[str] = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
lowercase_ : Any = torch.nn.functional.pad(__UpperCamelCase ,[0, 0, 0, max_patches - (rows * columns)] ).float()
lowercase_ : List[str] = to_numpy_array(__UpperCamelCase )
return result
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,**__UpperCamelCase ) -> np.ndarray:
'''simple docstring'''
if image.dtype == np.uinta:
lowercase_ : Optional[int] = image.astype(np.floataa )
# take mean across the whole `image`
lowercase_ : Union[str, Any] = np.mean(__UpperCamelCase )
lowercase_ : Optional[int] = np.std(__UpperCamelCase )
lowercase_ : List[Any] = max(__UpperCamelCase ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(__UpperCamelCase ,mean=__UpperCamelCase ,std=__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = ChannelDimension.FIRST ,**__UpperCamelCase ,) -> ImageInput:
'''simple docstring'''
lowercase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase_ : Optional[Any] = patch_size if patch_size is not None else self.patch_size
lowercase_ : Union[str, Any] = max_patches if max_patches is not None else self.max_patches
lowercase_ : Dict = self.is_vqa
if kwargs.get('data_format' ,__UpperCamelCase ) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ' )
lowercase_ : List[Any] = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase_ : Dict = [convert_to_rgb(__UpperCamelCase ) for image in images]
# All transformations expect numpy arrays.
lowercase_ : Union[str, Any] = [to_numpy_array(__UpperCamelCase ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.' )
lowercase_ : Optional[int] = kwargs.pop('font_bytes' ,__UpperCamelCase )
lowercase_ : Union[str, Any] = kwargs.pop('font_path' ,__UpperCamelCase )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : Optional[int] = [header_text] * len(__UpperCamelCase )
lowercase_ : Tuple = [
render_header(__UpperCamelCase ,header_text[i] ,font_bytes=__UpperCamelCase ,font_path=__UpperCamelCase )
for i, image in enumerate(__UpperCamelCase )
]
if do_normalize:
lowercase_ : Tuple = [self.normalize(image=__UpperCamelCase ) for image in images]
# convert to torch tensor and permute
lowercase_ : Optional[int] = [
self.extract_flattened_patches(image=__UpperCamelCase ,max_patches=__UpperCamelCase ,patch_size=__UpperCamelCase )
for image in images
]
# create attention mask in numpy
lowercase_ : List[Any] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
lowercase_ : str = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks} ,tensor_type=__UpperCamelCase )
return encoded_outputs
| 321
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[Any] = tempfile.mkdtemp()
# fmt: off
lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) )
lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowercase_ : Tuple = {'unk_token': '<unk>'}
lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCamelCase ) )
lowercase_ : Any = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_tokenizer()
lowercase_ : List[Any] = self.get_rust_tokenizer()
lowercase_ : Tuple = self.get_image_processor()
lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase )
lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 )
lowercase_ : Any = CLIPSegProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = self.get_image_processor()
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : List[Any] = self.prepare_image_inputs()
lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' )
lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = self.get_image_processor()
lowercase_ : List[Any] = self.get_tokenizer()
lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Dict = 'lower newer'
lowercase_ : Any = processor(text=__UpperCamelCase )
lowercase_ : int = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = self.get_image_processor()
lowercase_ : str = self.get_tokenizer()
lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : List[Any] = 'lower newer'
lowercase_ : str = self.prepare_image_inputs()
lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = self.get_image_processor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Optional[int] = self.prepare_image_inputs()
lowercase_ : Optional[Any] = self.prepare_image_inputs()
lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = self.get_image_processor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase )
lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
| 321
| 1
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
lowercase_ : str = 1
lowercase_ : Tuple = 1
while repunit:
lowercase_ : Any = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowercase__( __SCREAMING_SNAKE_CASE : int = 1_00_00_00 ):
lowercase_ : Any = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__SCREAMING_SNAKE_CASE ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"{solution() = }")
| 321
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 321
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE ={"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 321
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = parent
lowercase_ : Tuple = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Optional[Any] = is_training
lowercase_ : Any = use_input_mask
lowercase_ : Optional[Any] = vocab_size
lowercase_ : str = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Union[str, Any] = use_labels
lowercase_ : Union[str, Any] = scope
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : List[str] = None
if self.use_input_mask:
lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : str = self.prepare_config_and_inputs()
lowercase_ : int = True
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = True
lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Union[str, Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,)
lowercase_ : Dict = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int:
'''simple docstring'''
lowercase_ : List[str] = True
lowercase_ : Union[str, Any] = True
lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
# first forward pass
lowercase_ : str = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,)
lowercase_ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 )
lowercase_ : int = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
lowercase_ : List[Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
# select random slice
lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoderTester(self )
lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs()
lowercase_ : Optional[int] = 'bert'
self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase_ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Tuple = model(__UpperCamelCase )[0]
lowercase_ : Dict = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : str = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Dict = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : Dict = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321
| 1
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = ''
for i in table:
res += inp[i - 1]
return res
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ):
return data[1:] + data[0]
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Any = ''
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : List[Any] = int('0b' + data[0] + data[-1] , 2 )
lowercase_ : Tuple = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase_ : Optional[int] = message[:4]
lowercase_ : List[str] = message[4:]
lowercase_ : Optional[Any] = apply_table(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = xor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = apply_sbox(__SCREAMING_SNAKE_CASE , temp[:4] ) # noqa: E741
lowercase_ : List[Any] = apply_sbox(__SCREAMING_SNAKE_CASE , temp[4:] )
lowercase_ : str = '0' * (2 - len(__SCREAMING_SNAKE_CASE )) + l # noqa: E741
lowercase_ : Union[str, Any] = '0' * (2 - len(__SCREAMING_SNAKE_CASE )) + r
lowercase_ : Any = apply_table(l + r , __SCREAMING_SNAKE_CASE )
lowercase_ : int = xor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return temp + right
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =input("Enter 10 bit key: ")
__SCREAMING_SNAKE_CASE =input("Enter 8 bit message: ")
__SCREAMING_SNAKE_CASE =[6, 3, 7, 4, 8, 5, 10, 9]
__SCREAMING_SNAKE_CASE =[3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
__SCREAMING_SNAKE_CASE =[2, 4, 3, 1]
__SCREAMING_SNAKE_CASE =[2, 6, 3, 1, 4, 8, 5, 7]
__SCREAMING_SNAKE_CASE =[4, 1, 3, 5, 7, 2, 8, 6]
__SCREAMING_SNAKE_CASE =[4, 1, 2, 3, 2, 3, 4, 1]
__SCREAMING_SNAKE_CASE =[[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__SCREAMING_SNAKE_CASE =[[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__SCREAMING_SNAKE_CASE =apply_table(key, paa_table)
__SCREAMING_SNAKE_CASE =temp[:5]
__SCREAMING_SNAKE_CASE =temp[5:]
__SCREAMING_SNAKE_CASE =left_shift(left)
__SCREAMING_SNAKE_CASE =left_shift(right)
__SCREAMING_SNAKE_CASE =apply_table(left + right, pa_table)
__SCREAMING_SNAKE_CASE =left_shift(left)
__SCREAMING_SNAKE_CASE =left_shift(right)
__SCREAMING_SNAKE_CASE =left_shift(left)
__SCREAMING_SNAKE_CASE =left_shift(right)
__SCREAMING_SNAKE_CASE =apply_table(left + right, pa_table)
# encryption
__SCREAMING_SNAKE_CASE =apply_table(message, IP)
__SCREAMING_SNAKE_CASE =function(expansion, sa, sa, keya, temp)
__SCREAMING_SNAKE_CASE =temp[4:] + temp[:4]
__SCREAMING_SNAKE_CASE =function(expansion, sa, sa, keya, temp)
__SCREAMING_SNAKE_CASE =apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
__SCREAMING_SNAKE_CASE =apply_table(CT, IP)
__SCREAMING_SNAKE_CASE =function(expansion, sa, sa, keya, temp)
__SCREAMING_SNAKE_CASE =temp[4:] + temp[:4]
__SCREAMING_SNAKE_CASE =function(expansion, sa, sa, keya, temp)
__SCREAMING_SNAKE_CASE =apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 321
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
return None
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
return None
class UpperCamelCase ( unittest.TestCase ):
lowercase = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
from transformers import BertModel
lowercase_ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(__UpperCamelCase ) )
vocab_file.flush()
lowercase_ : List[str] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase_ : Optional[Any] = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) )
model.save_pretrained(__UpperCamelCase )
self._test_export(__UpperCamelCase ,'pt' ,12 ,__UpperCamelCase )
@require_tf
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Optional[int] = self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase )
lowercase_ : int = quantize(Path(__UpperCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Tuple = self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase )
lowercase_ : Tuple = quantize(__UpperCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase_ : Dict = Path(__UpperCamelCase ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
return path
except Exception as e:
self.fail(__UpperCamelCase )
@require_torch
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
from transformers import BertModel
lowercase_ : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'pt' )
@require_tf
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
from transformers import TFBertModel
lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase_ : Any = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'tf' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Tuple = FeatureExtractionPipeline(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Dict = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(__UpperCamelCase ,__UpperCamelCase )
# Assert all variables are present
self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] ,__UpperCamelCase )
self.assertSequenceEqual(variable_names[3:] ,__UpperCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = ['input_ids', 'attention_mask', 'token_type_ids']
lowercase_ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowercase_ , lowercase_ : int = ensure_valid_input(FuncContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__UpperCamelCase ) ,3 )
# Should have exactly the same input names
self.assertEqual(set(__UpperCamelCase ) ,set(__UpperCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__UpperCamelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase_ , lowercase_ : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__UpperCamelCase ) ,1 )
self.assertEqual(len(__UpperCamelCase ) ,1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] ,tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] ,'input_ids' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
| 321
| 1
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
__SCREAMING_SNAKE_CASE ="\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
__SCREAMING_SNAKE_CASE ="\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
__SCREAMING_SNAKE_CASE ="\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) ,codebase_urls=['https://www.atticusprojectai.org/cuad'] ,reference_urls=['https://www.atticusprojectai.org/cuad'] ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : str = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
lowercase_ : Union[str, Any] = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
lowercase_ : Optional[int] = evaluate(dataset=__UpperCamelCase ,predictions=__UpperCamelCase )
return score
| 321
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]]
lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(__UpperCamelCase ) # fails here
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 )
lowercase_ : str = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 )
lowercase_ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 )
lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 321
| 1
|
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCamelCase ( lowercase_ ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'BlipImageProcessor'
lowercase = 'AutoTokenizer'
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
super().__init__(__UpperCamelCase ,__UpperCamelCase )
# add QFormer tokenizer
lowercase_ : Any = qformer_tokenizer
def __call__( self ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = True ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = 0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = False ,__UpperCamelCase = False ,__UpperCamelCase = False ,__UpperCamelCase = False ,__UpperCamelCase = True ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> BatchFeature:
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
lowercase_ : Union[str, Any] = BatchFeature()
if text is not None:
lowercase_ : Tuple = self.tokenizer(
text=__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,padding=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase ,stride=__UpperCamelCase ,pad_to_multiple_of=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,return_overflowing_tokens=__UpperCamelCase ,return_special_tokens_mask=__UpperCamelCase ,return_offsets_mapping=__UpperCamelCase ,return_token_type_ids=__UpperCamelCase ,return_length=__UpperCamelCase ,verbose=__UpperCamelCase ,return_tensors=__UpperCamelCase ,**__UpperCamelCase ,)
encoding.update(__UpperCamelCase )
lowercase_ : List[str] = self.qformer_tokenizer(
text=__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,padding=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase ,stride=__UpperCamelCase ,pad_to_multiple_of=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,return_overflowing_tokens=__UpperCamelCase ,return_special_tokens_mask=__UpperCamelCase ,return_offsets_mapping=__UpperCamelCase ,return_token_type_ids=__UpperCamelCase ,return_length=__UpperCamelCase ,verbose=__UpperCamelCase ,return_tensors=__UpperCamelCase ,**__UpperCamelCase ,)
lowercase_ : Optional[Any] = qformer_text_encoding.pop('input_ids' )
lowercase_ : Union[str, Any] = qformer_text_encoding.pop('attention_mask' )
if images is not None:
lowercase_ : str = self.image_processor(__UpperCamelCase ,return_tensors=__UpperCamelCase )
encoding.update(__UpperCamelCase )
return encoding
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase ,**__UpperCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : str = self.tokenizer.model_input_names
lowercase_ : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,**__UpperCamelCase ) -> Any:
'''simple docstring'''
if os.path.isfile(__UpperCamelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
lowercase_ : List[Any] = os.path.join(__UpperCamelCase ,'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(__UpperCamelCase )
return super().save_pretrained(__UpperCamelCase ,**__UpperCamelCase )
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase ,subfolder='qformer_tokenizer' )
lowercase_ : List[str] = cls._get_arguments_from_pretrained(__UpperCamelCase ,**__UpperCamelCase )
args.append(__UpperCamelCase )
return cls(*__UpperCamelCase )
| 321
|
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
def get_masked_lm_array(__SCREAMING_SNAKE_CASE : str ):
lowercase_ : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[Any] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_array(__SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : Tuple = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : List[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[str] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_attention_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = array.reshape(__SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[str] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
print(F'''Loading model based on config from {config_path}...''' )
lowercase_ : Any = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = BertForMaskedLM(__SCREAMING_SNAKE_CASE )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
lowercase_ : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
lowercase_ : BertSelfAttention = layer.attention.self
lowercase_ : str = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_query_dense/kernel' , self_attn.query.weight.data.shape )
lowercase_ : Tuple = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_query_dense/bias' , self_attn.query.bias.data.shape )
lowercase_ : Tuple = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_key_dense/kernel' , self_attn.key.weight.data.shape )
lowercase_ : int = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_key_dense/bias' , self_attn.key.bias.data.shape )
lowercase_ : Dict = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_value_dense/kernel' , self_attn.value.weight.data.shape )
lowercase_ : List[Any] = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
lowercase_ : BertSelfOutput = layer.attention.output
lowercase_ : Dict = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_output_dense/kernel' , self_output.dense.weight.data.shape )
lowercase_ : Any = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_output_dense/bias' , self_output.dense.bias.data.shape )
lowercase_ : Tuple = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/gamma' )
lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/beta' )
# Intermediate
lowercase_ : BertIntermediate = layer.intermediate
lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/kernel' )
lowercase_ : Optional[int] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/bias' )
# Output
lowercase_ : BertOutput = layer.output
lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/kernel' )
lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/bias' )
lowercase_ : List[str] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/gamma' )
lowercase_ : int = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/beta' )
# Embeddings
lowercase_ : Optional[Any] = get_encoder_array('_position_embedding_layer/embeddings' )
lowercase_ : int = get_encoder_array('_type_embedding_layer/embeddings' )
lowercase_ : Any = get_encoder_array('_embedding_norm_layer/gamma' )
lowercase_ : Optional[Any] = get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
lowercase_ : int = model.cls.predictions.transform
lowercase_ : str = get_masked_lm_array('dense/kernel' )
lowercase_ : Optional[Any] = get_masked_lm_array('dense/bias' )
lowercase_ : Optional[Any] = get_masked_lm_array('layer_norm/gamma' )
lowercase_ : Optional[int] = get_masked_lm_array('layer_norm/beta' )
lowercase_ : List[str] = get_masked_lm_array('embedding_table' )
# Pooling
lowercase_ : Optional[Any] = BertPooler(config=__SCREAMING_SNAKE_CASE )
lowercase_ : BertPooler = get_encoder_array('_pooler_layer/kernel' )
lowercase_ : BertPooler = get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(__SCREAMING_SNAKE_CASE )
# Integration test - should load without any errors ;)
lowercase_ : Tuple = BertForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 321
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[Any] = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowercase_ : Dict = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
lowercase_ : Dict = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
lowercase_ : List[str] = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowercase_ : Union[str, Any] = model(__UpperCamelCase )['last_hidden_state'].detach()
self.assertEqual(output.shape ,__UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCamelCase ,atol=1e-3 ) )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Any = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
lowercase_ : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
lowercase_ : Dict = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
lowercase_ : Tuple = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowercase_ : List[Any] = model(__UpperCamelCase )['last_hidden_state'].detach()
self.assertEqual(output.shape ,__UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCamelCase ,atol=1e-3 ) )
| 321
|
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
__SCREAMING_SNAKE_CASE =namedtuple("covid_data", "cases deaths recovered")
def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ):
lowercase_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 321
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ="▁"
__SCREAMING_SNAKE_CASE ={"vocab_file": "sentencepiece.bpe.model"}
__SCREAMING_SNAKE_CASE ={
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
__SCREAMING_SNAKE_CASE ={
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
__SCREAMING_SNAKE_CASE =["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class UpperCamelCase ( lowercase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = ['input_ids', 'attention_mask']
lowercase = []
lowercase = []
def __init__( self ,__UpperCamelCase ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase = None ,__UpperCamelCase=None ,__UpperCamelCase=False ,**__UpperCamelCase ,) -> Optional[int]:
'''simple docstring'''
lowercase_ : Dict = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else mask_token
lowercase_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase_ : Dict = legacy_behaviour
super().__init__(
bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,src_lang=__UpperCamelCase ,tgt_lang=__UpperCamelCase ,additional_special_tokens=__UpperCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,legacy_behaviour=__UpperCamelCase ,**__UpperCamelCase ,)
lowercase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
lowercase_ : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase_ : Dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase_ : List[Any] = 1
lowercase_ : Union[str, Any] = len(self.sp_model )
lowercase_ : str = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__UpperCamelCase )
}
lowercase_ : Union[str, Any] = {v: k for k, v in self.lang_code_to_id.items()}
lowercase_ : Any = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase_ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase_ : List[Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowercase_ : str = src_lang if src_lang is not None else 'eng_Latn'
lowercase_ : Dict = self.lang_code_to_id[self._src_lang]
lowercase_ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> str:
'''simple docstring'''
lowercase_ : List[Any] = self.__dict__.copy()
lowercase_ : List[str] = None
lowercase_ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
lowercase_ : List[str] = {}
lowercase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase ,token_ids_a=__UpperCamelCase ,already_has_special_tokens=__UpperCamelCase )
lowercase_ : int = [1] * len(self.prefix_tokens )
lowercase_ : Union[str, Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCamelCase )) + ([0] * len(__UpperCamelCase )) + suffix_ones
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : int = [self.sep_token_id]
lowercase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowercase_ : Any = src_lang
lowercase_ : Union[str, Any] = self(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,return_tensors=__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : Any = self.convert_tokens_to_ids(__UpperCamelCase )
lowercase_ : int = tgt_lang_id
return inputs
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__UpperCamelCase ,out_type=__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ : int = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Any = ''.join(__UpperCamelCase ).replace(__UpperCamelCase ,' ' ).strip()
return out_string
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Optional[Any] = os.path.join(
__UpperCamelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase ,'wb' ) as fi:
lowercase_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = "eng_Latn" ,__UpperCamelCase = None ,__UpperCamelCase = "fra_Latn" ,**__UpperCamelCase ,) -> BatchEncoding:
'''simple docstring'''
lowercase_ : Dict = src_lang
lowercase_ : Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : Any = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
lowercase_ : int = []
lowercase_ : Any = [self.eos_token_id, self.cur_lang_code]
else:
lowercase_ : int = [self.cur_lang_code]
lowercase_ : str = [self.eos_token_id]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : List[str] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
lowercase_ : Optional[int] = []
lowercase_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
lowercase_ : Tuple = [self.cur_lang_code]
lowercase_ : str = [self.eos_token_id]
| 321
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 321
| 1
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=33 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_input_mask
lowercase_ : Optional[Any] = use_token_type_ids
lowercase_ : List[str] = use_labels
lowercase_ : Any = vocab_size
lowercase_ : List[str] = hidden_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : Tuple = max_position_embeddings
lowercase_ : Optional[int] = type_vocab_size
lowercase_ : Optional[int] = type_sequence_label_size
lowercase_ : Dict = initializer_range
lowercase_ : int = num_labels
lowercase_ : Any = num_choices
lowercase_ : int = scope
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Dict = None
if self.use_input_mask:
lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Tuple = None
lowercase_ : Tuple = None
lowercase_ : Tuple = None
if self.use_labels:
lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
lowercase_ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : List[Any] = EsmModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Tuple = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Union[str, Any] = model(__UpperCamelCase )
lowercase_ : int = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = EsmForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = self.num_labels
lowercase_ : int = EsmForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Any = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Optional[int] = config_and_inputs
lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = False
lowercase = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = ()
lowercase = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = EsmModelTester(self )
lowercase_ : List[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ : Optional[Any] = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = EsmModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowercase_ : str = EsmEmbeddings(config=__UpperCamelCase )
lowercase_ : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowercase_ : List[Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowercase_ : Tuple = create_position_ids_from_input_ids(__UpperCamelCase ,model.padding_idx )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowercase_ : List[Any] = EsmEmbeddings(config=__UpperCamelCase )
lowercase_ : List[Any] = torch.empty(2 ,4 ,30 )
lowercase_ : List[str] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowercase_ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowercase_ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCamelCase ( lowercase_ ):
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
with torch.no_grad():
lowercase_ : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase_ : List[str] = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = 33
lowercase_ : Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : List[str] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
lowercase_ : int = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase_ : Dict = model(__UpperCamelCase )[0]
# compare the actual values for a slice.
lowercase_ : Any = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321
| 1
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase = 't5'
lowercase = ['past_key_values']
lowercase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self ,__UpperCamelCase=3_2128 ,__UpperCamelCase=512 ,__UpperCamelCase=64 ,__UpperCamelCase=2048 ,__UpperCamelCase=6 ,__UpperCamelCase=None ,__UpperCamelCase=8 ,__UpperCamelCase=32 ,__UpperCamelCase=128 ,__UpperCamelCase=0.1 ,__UpperCamelCase=1e-6 ,__UpperCamelCase=1.0 ,__UpperCamelCase="relu" ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=0 ,__UpperCamelCase=1 ,**__UpperCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : int = vocab_size
lowercase_ : str = d_model
lowercase_ : str = d_kv
lowercase_ : List[Any] = d_ff
lowercase_ : List[str] = num_layers
lowercase_ : Tuple = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase_ : Union[str, Any] = num_heads
lowercase_ : Tuple = relative_attention_num_buckets
lowercase_ : Optional[int] = relative_attention_max_distance
lowercase_ : Optional[Any] = dropout_rate
lowercase_ : str = layer_norm_epsilon
lowercase_ : List[str] = initializer_factor
lowercase_ : int = feed_forward_proj
lowercase_ : Optional[Any] = use_cache
lowercase_ : Optional[Any] = self.feed_forward_proj.split('-' )
lowercase_ : Dict = act_info[-1]
lowercase_ : List[str] = act_info[0] == 'gated'
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowercase_ : Dict = 'gelu_new'
super().__init__(
pad_token_id=__a ,eos_token_id=__a ,is_encoder_decoder=__a ,**__a ,)
class UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowercase_ : Union[str, Any] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowercase_ : Tuple = 'past_encoder_sequence + sequence'
lowercase_ : Dict = {0: 'batch'}
lowercase_ : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowercase_ : Tuple = {0: 'batch', 1: 'decoder_sequence'}
lowercase_ : int = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__a ,direction='inputs' )
return common_inputs
@property
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return 13
| 350
|
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[int] = bp_numa
lowercase_ : Dict = bp_numa
lowercase_ : Tuple = bp_numa
lowercase_ : List[Any] = conva_get[:2]
lowercase_ : int = conva_get[2]
lowercase_ : Dict = size_pa
lowercase_ : int = rate_w
lowercase_ : Union[str, Any] = rate_t
lowercase_ : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1
lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(__UpperCamelCase ,'wb' ) as f:
pickle.dump(__UpperCamelCase ,__UpperCamelCase )
print(f'''Model saved: {save_path}''' )
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
with open(__UpperCamelCase ,'rb' ) as f:
lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301
lowercase_ : str = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' )
lowercase_ : Optional[Any] = model_dic.get('num_bp1' )
lowercase_ : str = model_dic.get('num_bp2' )
lowercase_ : Optional[Any] = model_dic.get('num_bp3' )
lowercase_ : Union[str, Any] = model_dic.get('rate_weight' )
lowercase_ : Optional[int] = model_dic.get('rate_thre' )
# create model instance
lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# modify model parameter
lowercase_ : Optional[Any] = model_dic.get('w_conv1' )
lowercase_ : Tuple = model_dic.get('wkj' )
lowercase_ : Union[str, Any] = model_dic.get('vji' )
lowercase_ : Optional[Any] = model_dic.get('thre_conv1' )
lowercase_ : Dict = model_dic.get('thre_bp2' )
lowercase_ : Optional[int] = model_dic.get('thre_bp3' )
return conv_ins
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return round(__UpperCamelCase ,3 )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = convs[0]
lowercase_ : Any = convs[1]
lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0]
# get the data slice of original image data, data_focus
lowercase_ : Tuple = []
for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
lowercase_ : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__UpperCamelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase_ : Dict = []
lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__UpperCamelCase ):
lowercase_ : Tuple = []
for i_focus in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[int] = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__UpperCamelCase ) )
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape(
__UpperCamelCase ,__UpperCamelCase )
data_featuremap.append(__UpperCamelCase )
# expanding the data slice to One dimenssion
lowercase_ : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) )
lowercase_ : str = np.asarray(__UpperCamelCase )
return focus_list, data_featuremap
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple:
'''simple docstring'''
lowercase_ : Union[str, Any] = len(featuremaps[0] )
lowercase_ : str = int(size_map / size_pooling )
lowercase_ : Optional[int] = []
for i_map in range(len(__UpperCamelCase ) ):
lowercase_ : int = featuremaps[i_map]
lowercase_ : List[str] = []
for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[str] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__UpperCamelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__UpperCamelCase ) )
lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase )
featuremap_pooled.append(__UpperCamelCase )
return featuremap_pooled
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Tuple = []
for i in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[Any] = np.shape(data[i] )
lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] )
lowercase_ : List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(__UpperCamelCase )
lowercase_ : int = np.asarray(__UpperCamelCase )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Any = np.asarray(__UpperCamelCase )
lowercase_ : Any = np.shape(__UpperCamelCase )
lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Any = []
lowercase_ : List[Any] = 0
for i_map in range(__UpperCamelCase ):
lowercase_ : List[str] = np.ones((size_map, size_map) )
for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = pd_pool[
i_pool
]
lowercase_ : Any = i_pool + 1
lowercase_ : Optional[int] = np.multiply(
__UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(__UpperCamelCase )
return pd_all
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]:
'''simple docstring'''
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) )
print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) )
lowercase_ : int = 0
lowercase_ : Tuple = []
lowercase_ : Tuple = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowercase_ : List[str] = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(__UpperCamelCase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase_ : int = np.asmatrix(datas_train[p] )
lowercase_ : Any = np.asarray(datas_teach[p] )
lowercase_ , lowercase_ : Tuple = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : Optional[int] = np.shape(__UpperCamelCase )
lowercase_ : Optional[int] = self._expand(__UpperCamelCase )
lowercase_ : int = data_bp_input
lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa
lowercase_ : Dict = self.sig(__UpperCamelCase )
lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa
lowercase_ : int = self.sig(__UpperCamelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase_ : str = np.multiply(
(data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Optional[int] = np.multiply(
np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji )
lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase_ : Dict = pd_conva_pooled.T.getA().tolist()
lowercase_ : List[Any] = self._calculate_gradient_from_pool(
__UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase_ : Dict = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre
lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase_ : int = rp + 1
lowercase_ : Union[str, Any] = error_count / patterns
all_mse.append(__UpperCamelCase )
def draw_error():
lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__UpperCamelCase ,'+-' )
plt.plot(__UpperCamelCase ,'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(__UpperCamelCase ,alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) )
for p in range(len(__UpperCamelCase ) ):
lowercase_ : List[Any] = np.asmatrix(datas_test[p] )
lowercase_ , lowercase_ : Optional[Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : List[str] = self._expand(__UpperCamelCase )
lowercase_ : Any = data_bp_input
lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa
lowercase_ : str = self.sig(__UpperCamelCase )
lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa
lowercase_ : Optional[int] = self.sig(__UpperCamelCase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out]
return np.asarray(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase )
lowercase_ , lowercase_ : Union[str, Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 321
| 0
|
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase__( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Optional[Any] = [0] * no_of_processes
lowercase_ : str = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowercase__ ):
lowercase_ : List[Any] = burst_time[i]
lowercase_ : list[int] = []
lowercase_ : Optional[int] = 0
lowercase_ : List[str] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowercase_ : List[str] = []
lowercase_ : Optional[int] = -1
for i in range(lowercase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowercase__ )
if len(lowercase__ ) > 0:
lowercase_ : Any = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowercase_ : Any = i
total_time += burst_time[target_process]
completed += 1
lowercase_ : Union[str, Any] = 0
lowercase_ : Optional[Any] = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase__( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ):
lowercase_ : str = [0] * no_of_processes
for i in range(lowercase__ ):
lowercase_ : List[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
__SCREAMING_SNAKE_CASE =4
__SCREAMING_SNAKE_CASE =[2, 5, 3, 7]
__SCREAMING_SNAKE_CASE =[0, 0, 0, 0]
__SCREAMING_SNAKE_CASE =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__SCREAMING_SNAKE_CASE =calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 351
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
lowercase_ : Dict = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : str = 'patrickvonplaten/t5-tiny-random'
lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] )
lowercase_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__UpperCamelCase ):
self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'current' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
| 321
| 0
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
__SCREAMING_SNAKE_CASE =int(input("Enter number: ").strip())
print(F"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
| 352
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( lowercase_ ):
lowercase = ['input_values', 'padding_mask']
def __init__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = 2_4000 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
super().__init__(feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : List[str] = chunk_length_s
lowercase_ : Tuple = overlap
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
lowercase_ : Optional[int] = True
lowercase_ : Optional[int] = bool(
isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ):
lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa )
elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowercase_ : List[str] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowercase_ : Dict = [np.asarray(__UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__UpperCamelCase ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
lowercase_ : Optional[int] = None
lowercase_ : List[Any] = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowercase_ : List[Any] = min(array.shape[0] for array in raw_audio )
lowercase_ : int = int(np.floor(max_length / self.chunk_stride ) )
lowercase_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowercase_ : List[Any] = max(array.shape[0] for array in raw_audio )
lowercase_ : Tuple = int(np.ceil(max_length / self.chunk_stride ) )
lowercase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
lowercase_ : Union[str, Any] = 'max_length'
else:
lowercase_ : int = input_values
# normal padding on batch
if padded_inputs is None:
lowercase_ : int = self.pad(
__UpperCamelCase ,max_length=__UpperCamelCase ,truncation=__UpperCamelCase ,padding=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,)
if padding:
lowercase_ : Optional[int] = padded_inputs.pop('attention_mask' )
lowercase_ : Dict = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
lowercase_ : Optional[int] = example[..., None]
input_values.append(example.T )
lowercase_ : str = input_values
if return_tensors is not None:
lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs
| 321
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__) # pylint: disable=invalid-name
__SCREAMING_SNAKE_CASE ="\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str]=8 ):
lowercase_ : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any]=5_12 , __SCREAMING_SNAKE_CASE : Optional[int]=5_12 ):
lowercase_ : Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : Tuple = np.array(pil_image.convert('RGB' ) )
lowercase_ : Tuple = arr.astype(np.floataa ) / 127.5 - 1
lowercase_ : str = np.transpose(__lowerCAmelCase , [2, 0, 1] )
lowercase_ : str = torch.from_numpy(__lowerCAmelCase ).unsqueeze(0 )
return image
class UpperCamelCase ( A__ ):
"""simple docstring"""
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__snake_case ,scheduler=__snake_case ,movq=__snake_case ,)
lowercase_ : str = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Union[str, Any] = min(int(num_inference_steps * strength ) ,__snake_case )
lowercase_ : Dict = max(num_inference_steps - init_timestep ,0 )
lowercase_ : Union[str, Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ) -> List[Any]:
'''simple docstring'''
if not isinstance(__snake_case ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__snake_case )}''' )
lowercase_ : Union[str, Any] = image.to(device=__snake_case ,dtype=__snake_case )
lowercase_ : str = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : Dict = image
else:
if isinstance(__snake_case ,__snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(__snake_case ,__snake_case ):
lowercase_ : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__snake_case )
]
lowercase_ : Tuple = torch.cat(__snake_case ,dim=0 )
else:
lowercase_ : str = self.movq.encode(__snake_case ).latent_dist.sample(__snake_case )
lowercase_ : int = self.movq.config.scaling_factor * init_latents
lowercase_ : Optional[int] = torch.cat([init_latents] ,dim=0 )
lowercase_ : List[str] = init_latents.shape
lowercase_ : List[str] = randn_tensor(__snake_case ,generator=__snake_case ,device=__snake_case ,dtype=__snake_case )
# get latents
lowercase_ : Tuple = self.scheduler.add_noise(__snake_case ,__snake_case ,__snake_case )
lowercase_ : str = init_latents
return latents
def _UpperCAmelCase ( self ,__UpperCamelCase=0 ) -> Optional[int]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase_ : str = torch.device(f'''cuda:{gpu_id}''' )
lowercase_ : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__snake_case ,__snake_case )
def _UpperCAmelCase ( self ,__UpperCamelCase=0 ) -> List[Any]:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('>=' ,'0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase_ : Optional[Any] = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' ,silence_dtype_warnings=__snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : int = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Any = cpu_offload_with_hook(__snake_case ,__snake_case ,prev_module_hook=__snake_case )
# We'll offload the last model manually.
lowercase_ : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
if not hasattr(self.unet ,'_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__snake_case ,'_hf_hook' )
and hasattr(module._hf_hook ,'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__snake_case )
def __call__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = 512 ,__UpperCamelCase = 512 ,__UpperCamelCase = 100 ,__UpperCamelCase = 4.0 ,__UpperCamelCase = 0.3 ,__UpperCamelCase = 1 ,__UpperCamelCase = None ,__UpperCamelCase = "pil" ,__UpperCamelCase = True ,) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = self._execution_device
lowercase_ : Optional[Any] = guidance_scale > 1.0
if isinstance(__snake_case ,__snake_case ):
lowercase_ : Tuple = torch.cat(__snake_case ,dim=0 )
lowercase_ : int = image_embeds.shape[0]
if isinstance(__snake_case ,__snake_case ):
lowercase_ : Optional[Any] = torch.cat(__snake_case ,dim=0 )
if do_classifier_free_guidance:
lowercase_ : Any = image_embeds.repeat_interleave(__snake_case ,dim=0 )
lowercase_ : str = negative_image_embeds.repeat_interleave(__snake_case ,dim=0 )
lowercase_ : List[Any] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=__snake_case )
if not isinstance(__snake_case ,__snake_case ):
lowercase_ : Optional[Any] = [image]
if not all(isinstance(__snake_case ,(PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f'''Input is in incorrect format: {[type(__snake_case ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
lowercase_ : Union[str, Any] = torch.cat([prepare_image(__snake_case ,__snake_case ,__snake_case ) for i in image] ,dim=0 )
lowercase_ : Union[str, Any] = image.to(dtype=image_embeds.dtype ,device=__snake_case )
lowercase_ : str = self.movq.encode(__snake_case )['latents']
lowercase_ : Union[str, Any] = latents.repeat_interleave(__snake_case ,dim=0 )
self.scheduler.set_timesteps(__snake_case ,device=__snake_case )
lowercase_ , lowercase_ : Union[str, Any] = self.get_timesteps(__snake_case ,__snake_case ,__snake_case )
lowercase_ : Tuple = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Union[str, Any] = downscale_height_and_width(__snake_case ,__snake_case ,self.movq_scale_factor )
lowercase_ : List[str] = self.prepare_latents(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,image_embeds.dtype ,__snake_case ,__snake_case )
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : str = {'image_embeds': image_embeds}
lowercase_ : List[Any] = self.unet(
sample=__snake_case ,timestep=__snake_case ,encoder_hidden_states=__snake_case ,added_cond_kwargs=__snake_case ,return_dict=__snake_case ,)[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Dict = noise_pred.split(latents.shape[1] ,dim=1 )
lowercase_ , lowercase_ : str = noise_pred.chunk(2 )
lowercase_ , lowercase_ : str = variance_pred.chunk(2 )
lowercase_ : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : int = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : int = self.scheduler.step(
__snake_case ,__snake_case ,__snake_case ,generator=__snake_case ,)[0]
# post-processing
lowercase_ : Optional[int] = self.movq.decode(__snake_case ,force_not_quantize=__snake_case )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase_ : str = image * 0.5 + 0.5
lowercase_ : Optional[Any] = image.clamp(0 ,1 )
lowercase_ : Union[str, Any] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
lowercase_ : Union[str, Any] = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
| 353
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 321
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class UpperCamelCase ( a__ ):
lowercase = 'blip_text_model'
def __init__( self ,__UpperCamelCase=3_0524 ,__UpperCamelCase=768 ,__UpperCamelCase=768 ,__UpperCamelCase=3072 ,__UpperCamelCase=768 ,__UpperCamelCase=12 ,__UpperCamelCase=8 ,__UpperCamelCase=512 ,__UpperCamelCase="gelu" ,__UpperCamelCase=1e-12 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3_0522 ,__UpperCamelCase=2 ,__UpperCamelCase=0 ,__UpperCamelCase=102 ,__UpperCamelCase=True ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=_lowerCamelCase ,bos_token_id=_lowerCamelCase ,eos_token_id=_lowerCamelCase ,sep_token_id=_lowerCamelCase ,**_lowerCamelCase ,)
lowercase_ : List[Any] = vocab_size
lowercase_ : Tuple = hidden_size
lowercase_ : List[Any] = encoder_hidden_size
lowercase_ : Union[str, Any] = intermediate_size
lowercase_ : Tuple = projection_dim
lowercase_ : Union[str, Any] = hidden_dropout_prob
lowercase_ : str = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : str = max_position_embeddings
lowercase_ : List[str] = layer_norm_eps
lowercase_ : str = hidden_act
lowercase_ : List[str] = initializer_range
lowercase_ : Union[str, Any] = attention_probs_dropout_prob
lowercase_ : List[Any] = is_decoder
lowercase_ : Union[str, Any] = use_cache
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCamelCase )
lowercase_ : Dict = cls.get_config_dict(_lowerCamelCase ,**_lowerCamelCase )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
lowercase_ : str = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowerCamelCase ,**_lowerCamelCase )
class UpperCamelCase ( a__ ):
lowercase = 'blip_vision_model'
def __init__( self ,__UpperCamelCase=768 ,__UpperCamelCase=3072 ,__UpperCamelCase=512 ,__UpperCamelCase=12 ,__UpperCamelCase=12 ,__UpperCamelCase=384 ,__UpperCamelCase=16 ,__UpperCamelCase="gelu" ,__UpperCamelCase=1e-5 ,__UpperCamelCase=0.0 ,__UpperCamelCase=1e-10 ,**__UpperCamelCase ,) -> str:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = projection_dim
lowercase_ : Dict = num_hidden_layers
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : Dict = patch_size
lowercase_ : Dict = image_size
lowercase_ : List[Any] = initializer_range
lowercase_ : int = attention_dropout
lowercase_ : Optional[Any] = layer_norm_eps
lowercase_ : Optional[int] = hidden_act
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCamelCase )
lowercase_ : Dict = cls.get_config_dict(_lowerCamelCase ,**_lowerCamelCase )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
lowercase_ : Union[str, Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowerCamelCase ,**_lowerCamelCase )
class UpperCamelCase ( a__ ):
lowercase = 'blip'
lowercase = True
def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=512 ,__UpperCamelCase=2.6592 ,__UpperCamelCase=256 ,**__UpperCamelCase ,) -> int:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
if text_config is None:
lowercase_ : Union[str, Any] = {}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
lowercase_ : Optional[int] = {}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
lowercase_ : Union[str, Any] = BlipTextConfig(**_lowerCamelCase )
lowercase_ : Optional[int] = BlipVisionConfig(**_lowerCamelCase )
lowercase_ : List[str] = self.vision_config.hidden_size
lowercase_ : List[str] = projection_dim
lowercase_ : Union[str, Any] = logit_scale_init_value
lowercase_ : Tuple = 1.0
lowercase_ : Optional[Any] = 0.02
lowercase_ : Union[str, Any] = image_text_hidden_size
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**_lowerCamelCase )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[Any] = copy.deepcopy(self.__dict__ )
lowercase_ : Optional[int] = self.text_config.to_dict()
lowercase_ : str = self.vision_config.to_dict()
lowercase_ : Optional[Any] = self.__class__.model_type
return output
| 354
|
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ):
require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
| 321
| 0
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={"tokenizer_file": "tokenizer.json"}
__SCREAMING_SNAKE_CASE ={
"tokenizer_file": {
"bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
},
}
class UpperCamelCase ( lowercase__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = ['''input_ids''', '''attention_mask''']
lowercase = None
def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase=False ,__UpperCamelCase=False ,**__UpperCamelCase ,) -> str:
'''simple docstring'''
super().__init__(
_a ,_a ,tokenizer_file=_a ,unk_token=_a ,bos_token=_a ,eos_token=_a ,pad_token=_a ,add_prefix_space=_a ,clean_up_tokenization_spaces=_a ,**_a ,)
lowercase_ : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,_a ) != add_prefix_space:
lowercase_ : List[Any] = getattr(_a ,pre_tok_state.pop('type' ) )
lowercase_ : Optional[int] = add_prefix_space
lowercase_ : Tuple = pre_tok_class(**_a )
lowercase_ : Optional[int] = add_prefix_space
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = kwargs.get('is_split_into_words' ,_a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
' pretokenized inputs.' )
return super()._batch_encode_plus(*_a ,**_a )
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ : List[Any] = kwargs.get('is_split_into_words' ,_a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
' pretokenized inputs.' )
return super()._encode_plus(*_a ,**_a )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_a ,add_special_tokens=_a ) + [self.eos_token_id] )
if len(_a ) > self.model_max_length:
lowercase_ : Any = input_ids[-self.model_max_length :]
return input_ids
| 355
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False ):
lowercase_ : int = 'backbone.' if is_semantic else ''
lowercase_ : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=False ):
for i in range(config.num_hidden_layers ):
lowercase_ : Any = 'backbone.' if is_semantic else ''
# queries, keys and values
lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' )
lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' )
lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' )
lowercase_ : List[str] = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = q_bias
lowercase_ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Any = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowercase_ : Any = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' )
lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' )
lowercase_ : Tuple = gamma_a
lowercase_ : List[Any] = gamma_a
def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = val
def lowercase__( ):
lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ):
lowercase_ : List[str] = False if 'rvlcdip' in checkpoint_url else True
lowercase_ : Dict = BeitConfig(use_absolute_position_embeddings=__SCREAMING_SNAKE_CASE , use_mask_token=__SCREAMING_SNAKE_CASE )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowercase_ : Any = 10_24
lowercase_ : List[str] = 40_96
lowercase_ : Tuple = 24
lowercase_ : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
lowercase_ : Optional[Any] = 16
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : int = 'rvlcdip-id2label.json'
lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase_ : str = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowercase_ : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
lowercase_ : Optional[Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowercase_ : Optional[int] = BeitForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(__SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Check outputs on an image
lowercase_ : List[Any] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__SCREAMING_SNAKE_CASE )
lowercase_ : str = prepare_img()
lowercase_ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
lowercase_ : int = encoding['pixel_values']
lowercase_ : Any = model(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = outputs.logits
# verify logits
lowercase_ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(__SCREAMING_SNAKE_CASE ), "Shape of logits not as expected"
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
if has_lm_head:
lowercase_ : List[str] = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
lowercase_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , )
model.push_to_hub(
repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 321
| 0
|
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__SCREAMING_SNAKE_CASE = datasets.load_iris()
__SCREAMING_SNAKE_CASE = np.array(data["data"])
__SCREAMING_SNAKE_CASE = np.array(data["target"])
__SCREAMING_SNAKE_CASE = data["""target_names"""]
__SCREAMING_SNAKE_CASE = train_test_split(X, y)
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple ):
return np.linalg.norm(np.array(__snake_case ) - np.array(__snake_case ) )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple=5 ):
lowercase_ : Any = zip(__snake_case , __snake_case )
# List of distances of all points from the point to be classified
lowercase_ : int = []
for data_point in data:
lowercase_ : Optional[int] = euclidean_distance(data_point[0] , __snake_case )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
lowercase_ : int = [i[1] for i in sorted(__snake_case )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
lowercase_ : Dict = Counter(__snake_case ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 356
|
"""simple docstring"""
__SCREAMING_SNAKE_CASE ={
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
__SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()}
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Union[str, Any] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
lowercase_ : Dict = ''
for word in coded.split():
while len(__SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
lowercase_ : Any = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 321
| 0
|
"""simple docstring"""
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__SCREAMING_SNAKE_CASE =Mapping[str, np.ndarray]
__SCREAMING_SNAKE_CASE =Mapping[str, Any] # Is a nested dict.
__SCREAMING_SNAKE_CASE =0.01
@dataclasses.dataclass(frozen=lowercase_ )
class UpperCamelCase :
lowercase = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowercase = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowercase = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowercase = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowercase = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowercase = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowercase = None
# Templates used to generate this protein (prediction-only)
lowercase = None
# Chain corresponding to each parent
lowercase = None
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Optional[int] = R'(\[[A-Z]+\]\n)'
lowercase_ : Optional[Any] = [tag.strip() for tag in re.split(__A , __A ) if len(__A ) > 0]
lowercase_ : List[Any] = zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] )
lowercase_ : Union[str, Any] = ['N', 'CA', 'C']
lowercase_ : Optional[int] = None
lowercase_ : Dict = None
lowercase_ : Optional[int] = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowercase_ : Any = g[1][0].strip()
for i in range(len(__A ) ):
if seq[i] not in residue_constants.restypes:
lowercase_ : Optional[Any] = 'X' # FIXME: strings are immutable
lowercase_ : Union[str, Any] = np.array(
[residue_constants.restype_order.get(__A , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowercase_ : Dict = []
for axis in range(3 ):
tertiary.append(list(map(__A , g[1][axis].split() ) ) )
lowercase_ : List[str] = np.array(__A )
lowercase_ : Optional[Any] = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__A ):
lowercase_ : Any = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowercase_ : Optional[int] = np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) )
lowercase_ : Dict = np.zeros(
(
len(__A ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__A ):
lowercase_ : str = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__A , atom_mask=__A , aatype=__A , residue_index=np.arange(len(__A ) ) , b_factors=__A , )
def lowercase__( __SCREAMING_SNAKE_CASE : Protein , __SCREAMING_SNAKE_CASE : int = 0 ):
lowercase_ : int = []
lowercase_ : List[Any] = prot.remark
if remark is not None:
pdb_headers.append(F'''REMARK {remark}''' )
lowercase_ : Dict = prot.parents
lowercase_ : Any = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowercase_ : Union[str, Any] = [p for i, p in zip(__A , __A ) if i == chain_id]
if parents is None or len(__A ) == 0:
lowercase_ : Tuple = ['N/A']
pdb_headers.append(F'''PARENT {" ".join(__A )}''' )
return pdb_headers
def lowercase__( __SCREAMING_SNAKE_CASE : Protein , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Optional[Any] = []
lowercase_ : Optional[int] = pdb_str.split('\n' )
lowercase_ : Tuple = prot.remark
if remark is not None:
out_pdb_lines.append(F'''REMARK {remark}''' )
lowercase_ : Union[str, Any] = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowercase_ : int = []
if prot.parents_chain_index is not None:
lowercase_ : Dict = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__A ) , [] )
parent_dict[str(__A )].append(__A )
lowercase_ : int = max([int(__A ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowercase_ : Optional[int] = parent_dict.get(str(__A ) , ['N/A'] )
parents_per_chain.append(__A )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowercase_ : Dict = [['N/A']]
def make_parent_line(__SCREAMING_SNAKE_CASE : Sequence[str] ) -> str:
return F'''PARENT {" ".join(__A )}'''
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowercase_ : Tuple = 0
for i, l in enumerate(__A ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__A )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__A ):
lowercase_ : List[Any] = parents_per_chain[chain_counter]
else:
lowercase_ : List[str] = ['N/A']
out_pdb_lines.append(make_parent_line(__A ) )
return "\n".join(__A )
def lowercase__( __SCREAMING_SNAKE_CASE : Protein ):
lowercase_ : Dict = residue_constants.restypes + ['X']
def res_atoa(__SCREAMING_SNAKE_CASE : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , 'UNK' )
lowercase_ : int = residue_constants.atom_types
lowercase_ : Optional[int] = []
lowercase_ : Optional[int] = prot.atom_mask
lowercase_ : Tuple = prot.aatype
lowercase_ : str = prot.atom_positions
lowercase_ : Union[str, Any] = prot.residue_index.astype(np.intaa )
lowercase_ : Union[str, Any] = prot.b_factors
lowercase_ : Union[str, Any] = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('Invalid aatypes.' )
lowercase_ : List[str] = get_pdb_headers(__A )
if len(__A ) > 0:
pdb_lines.extend(__A )
lowercase_ : str = aatype.shape[0]
lowercase_ : Union[str, Any] = 1
lowercase_ : Dict = 0
lowercase_ : Any = string.ascii_uppercase
lowercase_ : Dict = None
# Add all atom sites.
for i in range(__A ):
lowercase_ : List[str] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__A , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowercase_ : List[Any] = 'ATOM'
lowercase_ : Tuple = atom_name if len(__A ) == 4 else F''' {atom_name}'''
lowercase_ : Optional[Any] = ''
lowercase_ : Any = ''
lowercase_ : Tuple = 1.00
lowercase_ : Optional[Any] = atom_name[0] # Protein supports only C, N, O, S, this works.
lowercase_ : List[str] = ''
lowercase_ : int = 'A'
if chain_index is not None:
lowercase_ : List[Any] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowercase_ : Optional[int] = (
F'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'''
F'''{res_name_a:>3} {chain_tag:>1}'''
F'''{residue_index[i]:>4}{insertion_code:>1} '''
F'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'''
F'''{occupancy:>6.2f}{b_factor:>6.2f} '''
F'''{element:>2}{charge:>2}'''
)
pdb_lines.append(__A )
atom_index += 1
lowercase_ : Any = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowercase_ : Tuple = True
lowercase_ : List[Any] = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowercase_ : List[str] = 'TER'
lowercase_ : Any = (
F'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'''
)
pdb_lines.append(__A )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__A , __A ) )
pdb_lines.append('END' )
pdb_lines.append('' )
return "\n".join(__A )
def lowercase__( __SCREAMING_SNAKE_CASE : Protein ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowercase__( __SCREAMING_SNAKE_CASE : FeatureDict , __SCREAMING_SNAKE_CASE : ModelOutput , __SCREAMING_SNAKE_CASE : Optional[np.ndarray] = None , __SCREAMING_SNAKE_CASE : Optional[np.ndarray] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Optional[Sequence[str]] = None , __SCREAMING_SNAKE_CASE : Optional[Sequence[int]] = None , ):
return Protein(
aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=__A , remark=__A , parents=__A , parents_chain_index=__A , )
| 357
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
def count_of_possible_combinations_with_dp_array(
__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowercase_ : str = sum(
count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE )
for item in array )
lowercase_ : Tuple = answer
return answer
lowercase_ : Optional[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Dict = [0] * (target + 1)
lowercase_ : Dict = 1
for i in range(1 , target + 1 ):
for j in range(__SCREAMING_SNAKE_CASE ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE =3
__SCREAMING_SNAKE_CASE =5
__SCREAMING_SNAKE_CASE =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 321
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
return [ord(lowerCamelCase_ ) - 96 for elem in plain]
def lowercase__( __SCREAMING_SNAKE_CASE : list[int] ):
return "".join(chr(elem + 96 ) for elem in encoded )
def lowercase__( ):
lowercase_ : Optional[Any] = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , lowerCamelCase_ )
print('Decoded:' , decode(lowerCamelCase_ ) )
if __name__ == "__main__":
main()
| 358
|
"""simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : int = set_counts
lowercase_ : List[Any] = max(__UpperCamelCase )
lowercase_ : Union[str, Any] = len(__UpperCamelCase )
lowercase_ : Dict = [1] * num_sets
lowercase_ : Optional[int] = list(range(__UpperCamelCase ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase )
lowercase_ : int = self.get_parent(__UpperCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase_ : Tuple = 0
lowercase_ : str = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase_ : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase_ : str = 0
lowercase_ : Tuple = src_parent
lowercase_ : int = self.set_counts[src_parent]
lowercase_ : str = max(self.max_set ,__UpperCamelCase )
return True
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 321
| 0
|
"""simple docstring"""
from __future__ import annotations
import bisect
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] = 0 , __SCREAMING_SNAKE_CASE : int = -1 ) -> str:
if hi < 0:
lowercase_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE )
while lo < hi:
lowercase_ : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowercase_ : Optional[Any] = mid + 1
else:
lowercase_ : Optional[Any] = mid
return lo
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] = 0 , __SCREAMING_SNAKE_CASE : Tuple = -1 ) -> List[Any]:
if hi < 0:
lowercase_ : Optional[int] = len(__SCREAMING_SNAKE_CASE )
while lo < hi:
lowercase_ : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowercase_ : Optional[Any] = mid + 1
else:
lowercase_ : Any = mid
return lo
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : Tuple = -1 ) -> List[Any]:
sorted_collection.insert(bisect_left(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple = 0 , __SCREAMING_SNAKE_CASE : Union[str, Any] = -1 ) -> str:
sorted_collection.insert(bisect_right(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
lowercase_ : List[Any] = 0
lowercase_ : str = len(__SCREAMING_SNAKE_CASE ) - 1
while left <= right:
lowercase_ : Union[str, Any] = left + (right - left) // 2
lowercase_ : Optional[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowercase_ : Dict = midpoint - 1
else:
lowercase_ : int = midpoint + 1
return None
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
lowercase_ : Any = bisect.bisect_left(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if index != len(__SCREAMING_SNAKE_CASE ) and sorted_collection[index] == item:
return index
return None
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
if right < left:
return None
lowercase_ : Any = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , midpoint - 1 )
else:
return binary_search_by_recursion(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , midpoint + 1 , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =input("Enter numbers separated by comma:\n").strip()
__SCREAMING_SNAKE_CASE =sorted(int(item) for item in user_input.split(","))
__SCREAMING_SNAKE_CASE =int(input("Enter a single number to be found in the list:\n"))
__SCREAMING_SNAKE_CASE =binary_search(collection, target)
if result is None:
print(F"{target} was not found in {collection}.")
else:
print(F"{target} was found at position {result} in {collection}.")
| 359
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__SCREAMING_SNAKE_CASE ={
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__SCREAMING_SNAKE_CASE ={"facebook/blenderbot-3B": 128}
class UpperCamelCase ( lowercase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BlenderbotTokenizer
def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]:
'''simple docstring'''
super().__init__(
__UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,)
lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space:
lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) )
lowercase_ : Any = add_prefix_space
lowercase_ : Tuple = pre_tok_class(**__UpperCamelCase )
lowercase_ : int = add_prefix_space
lowercase_ : Any = 'post_processor'
lowercase_ : Optional[Any] = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase )
if tokenizer_component_instance:
lowercase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ : str = tuple(state['sep'] )
if "cls" in state:
lowercase_ : Union[str, Any] = tuple(state['cls'] )
lowercase_ : str = False
if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space:
lowercase_ : Dict = add_prefix_space
lowercase_ : int = True
if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets:
lowercase_ : Optional[Any] = trim_offsets
lowercase_ : Tuple = True
if changes_to_apply:
lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,state.pop('type' ) )
lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value
lowercase_ : str = value
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ : Optional[int] = kwargs.get('is_split_into_words' ,__UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : int = [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Any:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]:
'''simple docstring'''
lowercase_ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
lowercase_ : Dict = ' '.join(__UpperCamelCase )
lowercase_ : str = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
lowercase_ : List[str] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 321
| 0
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : list ):
if len(__lowerCamelCase ) <= 1:
return [tuple(__lowerCamelCase )]
lowercase_ : str = []
def generate(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , __lowerCamelCase )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowercase_ : Any = arr[k - 1], arr[i]
else: # k is odd
lowercase_ : Dict = arr[k - 1], arr[0]
generate(k - 1 , __lowerCamelCase )
generate(len(__lowerCamelCase ) , __lowerCamelCase )
return res
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =input("Enter numbers separated by a comma:\n").strip()
__SCREAMING_SNAKE_CASE =[int(item) for item in user_input.split(",")]
print(heaps(arr))
| 360
|
"""simple docstring"""
import os
import sys
import unittest
__SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py")
__SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase )
lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase )
lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'}
lowercase_ : Union[str, Any] = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase )
lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase )
lowercase_ : Any = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
lowercase_ : Any = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase )
lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase )
lowercase_ : Tuple = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
lowercase_ : Optional[Any] = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
| 321
| 0
|
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> Dict:
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config ,'steps_offset' ) and scheduler.config.steps_offset != 1:
lowercase_ : Optional[int] = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
'to update the config accordingly as leaving `steps_offset` might led to incorrect results'
' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'
' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'
' file'
)
deprecate('steps_offset!=1' ,'1.0.0' ,_a ,standard_warn=_a )
lowercase_ : List[Any] = dict(scheduler.config )
lowercase_ : Union[str, Any] = 1
lowercase_ : Any = FrozenDict(_a )
if hasattr(scheduler.config ,'skip_prk_steps' ) and scheduler.config.skip_prk_steps is False:
lowercase_ : Union[str, Any] = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'
' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'
' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'
' Hub, it would be very nice if you could open a Pull request for the'
' `scheduler/scheduler_config.json` file'
)
deprecate('skip_prk_steps not set' ,'1.0.0' ,_a ,standard_warn=_a )
lowercase_ : Any = dict(scheduler.config )
lowercase_ : Union[str, Any] = True
lowercase_ : Tuple = FrozenDict(_a )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
segmentation_model=_a ,segmentation_processor=_a ,vae=_a ,text_encoder=_a ,tokenizer=_a ,unet=_a ,scheduler=_a ,safety_checker=_a ,feature_extractor=_a ,)
def _UpperCAmelCase ( self ,__UpperCamelCase = "auto" ) -> Dict:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase_ : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.enable_attention_slicing(_a )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase_ : Optional[Any] = torch.device('cuda' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_a ,_a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.device != torch.device('meta' ) or not hasattr(self.unet ,'_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a ,'_hf_hook' )
and hasattr(module._hf_hook ,'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = 512 ,__UpperCamelCase = 512 ,__UpperCamelCase = 50 ,__UpperCamelCase = 7.5 ,__UpperCamelCase = None ,__UpperCamelCase = 1 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = "pil" ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = 1 ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
lowercase_ : Optional[int] = self.segmentation_processor(
text=[text] ,images=[image] ,padding='max_length' ,return_tensors='pt' ).to(self.device )
lowercase_ : Dict = self.segmentation_model(**_a )
lowercase_ : Optional[int] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase_ : List[str] = self.numpy_to_pil(_a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase_ : str = StableDiffusionInpaintPipeline(
vae=self.vae ,text_encoder=self.text_encoder ,tokenizer=self.tokenizer ,unet=self.unet ,scheduler=self.scheduler ,safety_checker=self.safety_checker ,feature_extractor=self.feature_extractor ,)
return inpainting_pipeline(
prompt=_a ,image=_a ,mask_image=_a ,height=_a ,width=_a ,num_inference_steps=_a ,guidance_scale=_a ,negative_prompt=_a ,num_images_per_prompt=_a ,eta=_a ,generator=_a ,latents=_a ,output_type=_a ,return_dict=_a ,callback=_a ,callback_steps=_a ,)
| 361
|
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ):
with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX )
try:
print(*__SCREAMING_SNAKE_CASE )
finally:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
__SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank)
__SCREAMING_SNAKE_CASE =socket.gethostname()
__SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__SCREAMING_SNAKE_CASE =dist.get_rank()
__SCREAMING_SNAKE_CASE =dist.get_world_size()
printflock(F"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(F"{gpu} is broken")
raise
| 321
| 0
|
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Optional[Any] = n
lowercase_ : Tuple = [None] * self.n
lowercase_ : Any = 0 # index of the first element
lowercase_ : Any = 0
lowercase_ : Optional[int] = 0
def __len__( self ) -> Any:
'''simple docstring'''
return self.size
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return self.size == 0
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
lowercase_ : str = data
lowercase_ : Optional[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
if self.size == 0:
raise Exception('UNDERFLOW' )
lowercase_ : Union[str, Any] = self.array[self.front]
lowercase_ : List[Any] = None
lowercase_ : Union[str, Any] = (self.front + 1) % self.n
self.size -= 1
return temp
| 362
|
"""simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : List[Any] = name
lowercase_ : int = val
def __str__( self ) -> Tuple:
'''simple docstring'''
return f'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return self.val < other.val
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = {}
lowercase_ : Tuple = {}
lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase )
def __getitem__( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
return self.get_value(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
return (idx - 1) // 2
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return idx * 2 + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return idx * 2 + 2
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
return self.heap_dict[key]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1
lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
lowercase_ : Any = idx
lowercase_ : str = i.val
for i in range(__UpperCamelCase ,-1 ,-1 ):
self.sift_down(__UpperCamelCase ,__UpperCamelCase )
return array
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
while True:
lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase )
lowercase_ : List[str] = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
lowercase_ : List[str] = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
lowercase_ : Dict = r
if smallest != idx:
lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx]
(
(
lowercase_
) , (
lowercase_
) ,
) : str = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowercase_ : Any = smallest
else:
break
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowercase_ : int = p
lowercase_ : str = self.get_parent_idx(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return self.heap[0]
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowercase_ : Tuple = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 ,self.heap )
return x
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
self.heap.append(__UpperCamelCase )
lowercase_ : Tuple = len(self.heap ) - 1
lowercase_ : Optional[int] = node.val
self.sift_up(len(self.heap ) - 1 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.heap ) == 0
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowercase_ : Any = new_value
lowercase_ : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
__SCREAMING_SNAKE_CASE =Node("R", -1)
__SCREAMING_SNAKE_CASE =Node("B", 6)
__SCREAMING_SNAKE_CASE =Node("A", 3)
__SCREAMING_SNAKE_CASE =Node("X", 1)
__SCREAMING_SNAKE_CASE =Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
| 0
|
"""simple docstring"""
__SCREAMING_SNAKE_CASE =range(2, 20 + 1)
__SCREAMING_SNAKE_CASE =[10**k for k in range(ks[-1] + 1)]
__SCREAMING_SNAKE_CASE ={}
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase_ : str = sum(a_i[j] for j in range(_lowerCamelCase , len(_lowerCamelCase ) ) )
lowercase_ : Dict = sum(a_i[j] * base[j] for j in range(min(len(_lowerCamelCase ) , _lowerCamelCase ) ) )
lowercase_ : int = 0, 0
lowercase_ : List[str] = n - i
lowercase_ : Optional[Any] = memo.get(_lowerCamelCase )
if sub_memo is not None:
lowercase_ : int = sub_memo.get(_lowerCamelCase )
if jumps is not None and len(_lowerCamelCase ) > 0:
# find and make the largest jump without going over
lowercase_ : Optional[int] = -1
for _k in range(len(_lowerCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowercase_ : Dict = _k
break
if max_jump >= 0:
lowercase_ : Any = jumps[max_jump]
# since the difference between jumps is cached, add c
lowercase_ : int = diff + c
for j in range(min(_lowerCamelCase , len(_lowerCamelCase ) ) ):
lowercase_ : int = divmod(_lowerCamelCase , 10 )
if new_c > 0:
add(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
lowercase_ : List[str] = []
else:
lowercase_ : int = {c: []}
lowercase_ : Optional[int] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowercase_ : str = next_term(_lowerCamelCase , k - 1 , i + dn , _lowerCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowercase_ : List[str] = compute(_lowerCamelCase , _lowerCamelCase , i + dn , _lowerCamelCase )
diff += _diff
dn += terms_jumped
lowercase_ : Union[str, Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowercase_ : Union[str, Any] = 0
while j < len(_lowerCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCamelCase , (diff, dn, k) )
return (diff, dn)
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
if i >= n:
return 0, i
if k > len(_lowerCamelCase ):
a_i.extend([0 for _ in range(k - len(_lowerCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowercase_ : List[Any] = i
lowercase_ : str = 0, 0, 0
for j in range(len(_lowerCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowercase_ : Dict = ds_c + ds_b
diff += addend
lowercase_ : List[Any] = 0
for j in range(_lowerCamelCase ):
lowercase_ : str = a_i[j] + addend
lowercase_ : Any = divmod(_lowerCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return diff, i - start_i
def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ):
for j in range(_lowerCamelCase , len(_lowerCamelCase ) ):
lowercase_ : Optional[Any] = digits[j] + addend
if s >= 10:
lowercase_ : List[str] = divmod(_lowerCamelCase , 10 )
lowercase_ : List[Any] = addend // 10 + quotient
else:
lowercase_ : int = s
lowercase_ : Optional[int] = addend // 10
if addend == 0:
break
while addend > 0:
lowercase_ : Union[str, Any] = divmod(_lowerCamelCase , 10 )
digits.append(_lowerCamelCase )
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] = 10**15 ):
lowercase_ : Dict = [1]
lowercase_ : Dict = 1
lowercase_ : List[Any] = 0
while True:
lowercase_ : Any = next_term(_lowerCamelCase , 20 , i + dn , _lowerCamelCase )
dn += terms_jumped
if dn == n - i:
break
lowercase_ : Optional[Any] = 0
for j in range(len(_lowerCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"{solution() = }")
| 363
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[Any] = tempfile.mkdtemp()
# fmt: off
lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) )
lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowercase_ : Tuple = {'unk_token': '<unk>'}
lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCamelCase ) )
lowercase_ : Any = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_tokenizer()
lowercase_ : List[Any] = self.get_rust_tokenizer()
lowercase_ : Tuple = self.get_image_processor()
lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase )
lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 )
lowercase_ : Any = CLIPSegProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = self.get_image_processor()
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : List[Any] = self.prepare_image_inputs()
lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' )
lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = self.get_image_processor()
lowercase_ : List[Any] = self.get_tokenizer()
lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Dict = 'lower newer'
lowercase_ : Any = processor(text=__UpperCamelCase )
lowercase_ : int = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = self.get_image_processor()
lowercase_ : str = self.get_tokenizer()
lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : List[Any] = 'lower newer'
lowercase_ : str = self.prepare_image_inputs()
lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = self.get_image_processor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Optional[int] = self.prepare_image_inputs()
lowercase_ : Optional[Any] = self.prepare_image_inputs()
lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = self.get_image_processor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase )
lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
| 321
| 0
|
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE ={
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
lowercase = '''mask2former'''
lowercase = ['''swin''']
lowercase = {'''hidden_size''': '''hidden_dim'''}
def __init__( self ,__UpperCamelCase = None ,__UpperCamelCase = 256 ,__UpperCamelCase = 256 ,__UpperCamelCase = 256 ,__UpperCamelCase = 1024 ,__UpperCamelCase = "relu" ,__UpperCamelCase = 6 ,__UpperCamelCase = 10 ,__UpperCamelCase = 8 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = 2048 ,__UpperCamelCase = False ,__UpperCamelCase = False ,__UpperCamelCase = 4 ,__UpperCamelCase = 255 ,__UpperCamelCase = 100 ,__UpperCamelCase = 0.1 ,__UpperCamelCase = 2.0 ,__UpperCamelCase = 5.0 ,__UpperCamelCase = 5.0 ,__UpperCamelCase = 1_2544 ,__UpperCamelCase = 3.0 ,__UpperCamelCase = 0.75 ,__UpperCamelCase = 0.02 ,__UpperCamelCase = 1.0 ,__UpperCamelCase = True ,__UpperCamelCase = [4, 8, 16, 32] ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> str:
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
lowercase_ : Optional[int] = CONFIG_MAPPING['swin'](
image_size=224 ,in_channels=3 ,patch_size=4 ,embed_dim=96 ,depths=[2, 2, 18, 2] ,num_heads=[3, 6, 12, 24] ,window_size=7 ,drop_path_rate=0.3 ,use_absolute_embeddings=lowercase_ ,out_features=['stage1', 'stage2', 'stage3', 'stage4'] ,)
if isinstance(lowercase_ ,lowercase_ ):
lowercase_ : Any = backbone_config.pop('model_type' )
lowercase_ : int = CONFIG_MAPPING[backbone_model_type]
lowercase_ : str = config_class.from_dict(lowercase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {",".join(self.backbones_supported )}''' )
lowercase_ : Union[str, Any] = backbone_config
lowercase_ : Dict = feature_size
lowercase_ : Union[str, Any] = mask_feature_size
lowercase_ : Optional[Any] = hidden_dim
lowercase_ : Optional[Any] = encoder_feedforward_dim
lowercase_ : List[str] = activation_function
lowercase_ : str = encoder_layers
lowercase_ : str = decoder_layers
lowercase_ : List[str] = num_attention_heads
lowercase_ : Any = dropout
lowercase_ : Any = dim_feedforward
lowercase_ : Tuple = pre_norm
lowercase_ : List[str] = enforce_input_projection
lowercase_ : Union[str, Any] = common_stride
lowercase_ : List[Any] = ignore_value
lowercase_ : Dict = num_queries
lowercase_ : Optional[Any] = no_object_weight
lowercase_ : int = class_weight
lowercase_ : Optional[int] = mask_weight
lowercase_ : Optional[int] = dice_weight
lowercase_ : Union[str, Any] = train_num_points
lowercase_ : Optional[Any] = oversample_ratio
lowercase_ : Tuple = importance_sample_ratio
lowercase_ : Union[str, Any] = init_std
lowercase_ : Optional[int] = init_xavier_std
lowercase_ : List[Any] = use_auxiliary_loss
lowercase_ : int = feature_strides
lowercase_ : Dict = output_auxiliary_logits
lowercase_ : Any = decoder_layers
super().__init__(**lowercase_ )
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ,**__UpperCamelCase ) -> Any:
'''simple docstring'''
return cls(
backbone_config=lowercase_ ,**lowercase_ ,)
def _UpperCAmelCase ( self ) -> Dict[str, any]:
'''simple docstring'''
lowercase_ : List[Any] = copy.deepcopy(self.__dict__ )
lowercase_ : Union[str, Any] = self.backbone_config.to_dict()
lowercase_ : int = self.__class__.model_type
return output
| 364
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 321
| 0
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : List[Any] = [0] * len(__a )
lowercase_ : int = []
lowercase_ : int = [1] * len(__a )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__a ) ):
if indegree[i] == 0:
queue.append(__a )
while queue:
lowercase_ : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowercase_ : str = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__a )
print(max(__a ) )
# Adjacency list of Graph
__SCREAMING_SNAKE_CASE ={0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 365
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = parent
lowercase_ : Tuple = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Optional[Any] = is_training
lowercase_ : Any = use_input_mask
lowercase_ : Optional[Any] = vocab_size
lowercase_ : str = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Union[str, Any] = use_labels
lowercase_ : Union[str, Any] = scope
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : List[str] = None
if self.use_input_mask:
lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : str = self.prepare_config_and_inputs()
lowercase_ : int = True
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = True
lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Union[str, Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,)
lowercase_ : Dict = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int:
'''simple docstring'''
lowercase_ : List[str] = True
lowercase_ : Union[str, Any] = True
lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
# first forward pass
lowercase_ : str = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,)
lowercase_ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 )
lowercase_ : int = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
lowercase_ : List[Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
# select random slice
lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoderTester(self )
lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs()
lowercase_ : Optional[int] = 'bert'
self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase_ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Tuple = model(__UpperCamelCase )[0]
lowercase_ : Dict = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : str = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Dict = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : Dict = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321
| 0
|
"""simple docstring"""
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 366
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
return None
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
return None
class UpperCamelCase ( unittest.TestCase ):
lowercase = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
from transformers import BertModel
lowercase_ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(__UpperCamelCase ) )
vocab_file.flush()
lowercase_ : List[str] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase_ : Optional[Any] = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) )
model.save_pretrained(__UpperCamelCase )
self._test_export(__UpperCamelCase ,'pt' ,12 ,__UpperCamelCase )
@require_tf
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Optional[int] = self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase )
lowercase_ : int = quantize(Path(__UpperCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Tuple = self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase )
lowercase_ : Tuple = quantize(__UpperCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase_ : Dict = Path(__UpperCamelCase ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
return path
except Exception as e:
self.fail(__UpperCamelCase )
@require_torch
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
from transformers import BertModel
lowercase_ : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'pt' )
@require_tf
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
from transformers import TFBertModel
lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase_ : Any = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'tf' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Tuple = FeatureExtractionPipeline(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Dict = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(__UpperCamelCase ,__UpperCamelCase )
# Assert all variables are present
self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] ,__UpperCamelCase )
self.assertSequenceEqual(variable_names[3:] ,__UpperCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = ['input_ids', 'attention_mask', 'token_type_ids']
lowercase_ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowercase_ , lowercase_ : int = ensure_valid_input(FuncContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__UpperCamelCase ) ,3 )
# Should have exactly the same input names
self.assertEqual(set(__UpperCamelCase ) ,set(__UpperCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__UpperCamelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase_ , lowercase_ : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__UpperCamelCase ) ,1 )
self.assertEqual(len(__UpperCamelCase ) ,1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] ,tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] ,'input_ids' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
| 321
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase__( __SCREAMING_SNAKE_CASE : list[int | str] ) -> Optional[Any]:
create_state_space_tree(a_ , [] , 0 , [0 for i in range(len(a_ ) )] )
def lowercase__( __SCREAMING_SNAKE_CASE : list[int | str] , __SCREAMING_SNAKE_CASE : list[int | str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , ) -> Dict:
if index == len(a_ ):
print(a_ )
return
for i in range(len(a_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
lowercase_ : str = True
create_state_space_tree(a_ , a_ , index + 1 , a_ )
current_sequence.pop()
lowercase_ : int = False
__SCREAMING_SNAKE_CASE =[3, 1, 2, 4]
generate_all_permutations(sequence)
__SCREAMING_SNAKE_CASE =["A", "B", "C"]
generate_all_permutations(sequence_a)
| 367
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]]
lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(__UpperCamelCase ) # fails here
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 )
lowercase_ : str = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 )
lowercase_ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 )
lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 321
| 0
|
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__SCREAMING_SNAKE_CASE =re.compile(r"\b(a|an|the)\b", re.UNICODE)
__SCREAMING_SNAKE_CASE =None
def lowercase__( ):
lowercase_ : str = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=_UpperCamelCase , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=_UpperCamelCase , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : Optional[Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowercase_ : Optional[Any] = bool(qa['answers']['text'] )
return qid_to_has_ans
def lowercase__( __SCREAMING_SNAKE_CASE : Dict ):
def remove_articles(__SCREAMING_SNAKE_CASE : Any ):
return ARTICLES_REGEX.sub(' ' , _UpperCamelCase )
def white_space_fix(__SCREAMING_SNAKE_CASE : List[Any] ):
return " ".join(text.split() )
def remove_punc(__SCREAMING_SNAKE_CASE : Dict ):
lowercase_ : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__SCREAMING_SNAKE_CASE : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCamelCase ) ) ) )
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ):
if not s:
return []
return normalize_answer(_UpperCamelCase ).split()
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ):
return int(normalize_answer(_UpperCamelCase ) == normalize_answer(_UpperCamelCase ) )
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : int = get_tokens(_UpperCamelCase )
lowercase_ : str = get_tokens(_UpperCamelCase )
lowercase_ : Optional[Any] = collections.Counter(_UpperCamelCase ) & collections.Counter(_UpperCamelCase )
lowercase_ : List[str] = sum(common.values() )
if len(_UpperCamelCase ) == 0 or len(_UpperCamelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowercase_ : Optional[Any] = 1.0 * num_same / len(_UpperCamelCase )
lowercase_ : int = 1.0 * num_same / len(_UpperCamelCase )
lowercase_ : Optional[Any] = (2 * precision * recall) / (precision + recall)
return fa
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : int = {}
lowercase_ : Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowercase_ : Any = qa['id']
lowercase_ : Tuple = [t for t in qa['answers']['text'] if normalize_answer(_UpperCamelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowercase_ : str = ['']
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
lowercase_ : Dict = preds[qid]
# Take max over all gold answers
lowercase_ : Tuple = max(compute_exact(_UpperCamelCase , _UpperCamelCase ) for a in gold_answers )
lowercase_ : Optional[Any] = max(compute_fa(_UpperCamelCase , _UpperCamelCase ) for a in gold_answers )
return exact_scores, fa_scores
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : List[str] = {}
for qid, s in scores.items():
lowercase_ : str = na_probs[qid] > na_prob_thresh
if pred_na:
lowercase_ : Any = float(not qid_to_has_ans[qid] )
else:
lowercase_ : int = s
return new_scores
def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any=None ):
if not qid_list:
lowercase_ : Tuple = len(_UpperCamelCase )
return collections.OrderedDict(
[
('exact', 1_00.0 * sum(exact_scores.values() ) / total),
('f1', 1_00.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
lowercase_ : str = len(_UpperCamelCase )
return collections.OrderedDict(
[
('exact', 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int ):
for k in new_eval:
lowercase_ : Tuple = new_eval[k]
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple ):
plt.step(_UpperCamelCase , _UpperCamelCase , color='b' , alpha=0.2 , where='post' )
plt.fill_between(_UpperCamelCase , _UpperCamelCase , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_UpperCamelCase )
plt.savefig(_UpperCamelCase )
plt.clf()
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : int=None ):
lowercase_ : str = sorted(_UpperCamelCase , key=lambda __SCREAMING_SNAKE_CASE : na_probs[k] )
lowercase_ : Dict = 0.0
lowercase_ : Optional[int] = 1.0
lowercase_ : Tuple = 0.0
lowercase_ : Optional[int] = [1.0]
lowercase_ : Optional[int] = [0.0]
lowercase_ : List[Any] = 0.0
for i, qid in enumerate(_UpperCamelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowercase_ : Any = true_pos / float(i + 1 )
lowercase_ : Tuple = true_pos / float(_UpperCamelCase )
if i == len(_UpperCamelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_UpperCamelCase )
recalls.append(_UpperCamelCase )
if out_image:
plot_pr_curve(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return {"ap": 1_00.0 * avg_prec}
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] ):
if out_image_dir and not os.path.exists(_UpperCamelCase ):
os.makedirs(_UpperCamelCase )
lowercase_ : int = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowercase_ : Tuple = make_precision_recall_eval(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , out_image=os.path.join(_UpperCamelCase , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
lowercase_ : str = make_precision_recall_eval(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , out_image=os.path.join(_UpperCamelCase , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
lowercase_ : List[str] = {k: float(_UpperCamelCase ) for k, v in qid_to_has_ans.items()}
lowercase_ : str = make_precision_recall_eval(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , out_image=os.path.join(_UpperCamelCase , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(_UpperCamelCase , _UpperCamelCase , 'pr_exact' )
merge_eval(_UpperCamelCase , _UpperCamelCase , 'pr_f1' )
merge_eval(_UpperCamelCase , _UpperCamelCase , 'pr_oracle' )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple ):
if not qid_list:
return
lowercase_ : int = [na_probs[k] for k in qid_list]
lowercase_ : Tuple = np.ones_like(_UpperCamelCase ) / float(len(_UpperCamelCase ) )
plt.hist(_UpperCamelCase , weights=_UpperCamelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(_UpperCamelCase , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowercase_ : str = num_no_ans
lowercase_ : List[Any] = cur_score
lowercase_ : Optional[int] = 0.0
lowercase_ : str = sorted(_UpperCamelCase , key=lambda __SCREAMING_SNAKE_CASE : na_probs[k] )
for i, qid in enumerate(_UpperCamelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowercase_ : Tuple = scores[qid]
else:
if preds[qid]:
lowercase_ : Union[str, Any] = -1
else:
lowercase_ : Optional[int] = 0
cur_score += diff
if cur_score > best_score:
lowercase_ : Union[str, Any] = cur_score
lowercase_ : Dict = na_probs[qid]
return 1_00.0 * best_score / len(_UpperCamelCase ), best_thresh
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple ):
lowercase_ , lowercase_ : List[str] = find_best_thresh(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ , lowercase_ : Dict = find_best_thresh(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Union[str, Any] = best_exact
lowercase_ : Optional[Any] = exact_thresh
lowercase_ : Optional[Any] = best_fa
lowercase_ : List[str] = fa_thresh
def lowercase__( ):
with open(OPTS.data_file ) as f:
lowercase_ : List[Any] = json.load(_UpperCamelCase )
lowercase_ : Dict = dataset_json['data']
with open(OPTS.pred_file ) as f:
lowercase_ : Tuple = json.load(_UpperCamelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowercase_ : str = json.load(_UpperCamelCase )
else:
lowercase_ : str = {k: 0.0 for k in preds}
lowercase_ : str = make_qid_to_has_ans(_UpperCamelCase ) # maps qid to True/False
lowercase_ : Tuple = [k for k, v in qid_to_has_ans.items() if v]
lowercase_ : Tuple = [k for k, v in qid_to_has_ans.items() if not v]
lowercase_ , lowercase_ : Optional[Any] = get_raw_scores(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Dict = apply_no_ans_threshold(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , OPTS.na_prob_thresh )
lowercase_ : Tuple = apply_no_ans_threshold(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , OPTS.na_prob_thresh )
lowercase_ : Union[str, Any] = make_eval_dict(_UpperCamelCase , _UpperCamelCase )
if has_ans_qids:
lowercase_ : Optional[int] = make_eval_dict(_UpperCamelCase , _UpperCamelCase , qid_list=_UpperCamelCase )
merge_eval(_UpperCamelCase , _UpperCamelCase , 'HasAns' )
if no_ans_qids:
lowercase_ : Any = make_eval_dict(_UpperCamelCase , _UpperCamelCase , qid_list=_UpperCamelCase )
merge_eval(_UpperCamelCase , _UpperCamelCase , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , OPTS.out_image_dir )
histogram_na_prob(_UpperCamelCase , _UpperCamelCase , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(_UpperCamelCase , _UpperCamelCase , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
else:
print(json.dumps(_UpperCamelCase , indent=2 ) )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 368
|
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
def get_masked_lm_array(__SCREAMING_SNAKE_CASE : str ):
lowercase_ : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[Any] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_array(__SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : Tuple = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : List[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[str] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_attention_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = array.reshape(__SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[str] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
print(F'''Loading model based on config from {config_path}...''' )
lowercase_ : Any = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = BertForMaskedLM(__SCREAMING_SNAKE_CASE )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
lowercase_ : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
lowercase_ : BertSelfAttention = layer.attention.self
lowercase_ : str = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_query_dense/kernel' , self_attn.query.weight.data.shape )
lowercase_ : Tuple = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_query_dense/bias' , self_attn.query.bias.data.shape )
lowercase_ : Tuple = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_key_dense/kernel' , self_attn.key.weight.data.shape )
lowercase_ : int = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_key_dense/bias' , self_attn.key.bias.data.shape )
lowercase_ : Dict = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_value_dense/kernel' , self_attn.value.weight.data.shape )
lowercase_ : List[Any] = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
lowercase_ : BertSelfOutput = layer.attention.output
lowercase_ : Dict = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_output_dense/kernel' , self_output.dense.weight.data.shape )
lowercase_ : Any = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_output_dense/bias' , self_output.dense.bias.data.shape )
lowercase_ : Tuple = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/gamma' )
lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/beta' )
# Intermediate
lowercase_ : BertIntermediate = layer.intermediate
lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/kernel' )
lowercase_ : Optional[int] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/bias' )
# Output
lowercase_ : BertOutput = layer.output
lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/kernel' )
lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/bias' )
lowercase_ : List[str] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/gamma' )
lowercase_ : int = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/beta' )
# Embeddings
lowercase_ : Optional[Any] = get_encoder_array('_position_embedding_layer/embeddings' )
lowercase_ : int = get_encoder_array('_type_embedding_layer/embeddings' )
lowercase_ : Any = get_encoder_array('_embedding_norm_layer/gamma' )
lowercase_ : Optional[Any] = get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
lowercase_ : int = model.cls.predictions.transform
lowercase_ : str = get_masked_lm_array('dense/kernel' )
lowercase_ : Optional[Any] = get_masked_lm_array('dense/bias' )
lowercase_ : Optional[Any] = get_masked_lm_array('layer_norm/gamma' )
lowercase_ : Optional[int] = get_masked_lm_array('layer_norm/beta' )
lowercase_ : List[str] = get_masked_lm_array('embedding_table' )
# Pooling
lowercase_ : Optional[Any] = BertPooler(config=__SCREAMING_SNAKE_CASE )
lowercase_ : BertPooler = get_encoder_array('_pooler_layer/kernel' )
lowercase_ : BertPooler = get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(__SCREAMING_SNAKE_CASE )
# Integration test - should load without any errors ;)
lowercase_ : Tuple = BertForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 321
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple = None ):
lowercase_ : int = word_bank or []
# create a table
lowercase_ : int = len(a__ ) + 1
lowercase_ : list[list[list[str]]] = []
for _ in range(a__ ):
table.append([] )
# seed value
lowercase_ : Tuple = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(a__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(a__ )] == word:
lowercase_ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(a__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(a__ )]:
combination.reverse()
return table[len(a__ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 369
|
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
__SCREAMING_SNAKE_CASE =namedtuple("covid_data", "cases deaths recovered")
def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ):
lowercase_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 321
| 0
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class UpperCamelCase ( __UpperCamelCase ):
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
super().__init__(*_lowerCAmelCase ,**_lowerCAmelCase )
requires_backends(self ,'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _UpperCAmelCase ( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[Any] = {}
lowercase_ : int = {}
if prompt is not None:
lowercase_ : Dict = prompt
if generate_kwargs is not None:
lowercase_ : List[Any] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase_ : Optional[int] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
lowercase_ : Any = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self ,__UpperCamelCase ,**__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return super().__call__(_lowerCAmelCase ,**_lowerCAmelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. '''
'Note also that one single text can be provided for conditional image to text generation.' )
lowercase_ : Tuple = self.model.config.model_type
if model_type == "git":
lowercase_ : Tuple = self.image_processor(images=_lowerCAmelCase ,return_tensors=self.framework )
lowercase_ : Any = self.tokenizer(text=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ).input_ids
lowercase_ : Tuple = [self.tokenizer.cls_token_id] + input_ids
lowercase_ : int = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
lowercase_ : Dict = self.image_processor(images=_lowerCAmelCase ,header_text=_lowerCAmelCase ,return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase_ : int = self.image_processor(images=_lowerCAmelCase ,return_tensors=self.framework )
lowercase_ : Optional[Any] = self.tokenizer(_lowerCAmelCase ,return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
lowercase_ : Tuple = self.image_processor(images=_lowerCAmelCase ,return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase_ : int = None
return model_inputs
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ) -> int:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] ,_lowerCAmelCase )
and all(x is None for x in model_inputs['input_ids'] )
):
lowercase_ : List[Any] = None
if generate_kwargs is None:
lowercase_ : Dict = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase_ : Dict = model_inputs.pop(self.model.main_input_name )
lowercase_ : Optional[int] = self.model.generate(_lowerCAmelCase ,**_lowerCAmelCase ,**_lowerCAmelCase )
return model_outputs
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = []
for output_ids in model_outputs:
lowercase_ : Union[str, Any] = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase ,skip_special_tokens=_lowerCAmelCase ,)
}
records.append(_lowerCAmelCase )
return records
| 370
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 321
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Any:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> int:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> int:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Any:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> int:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Tuple:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> int:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> int:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : List[Any] ):
requires_backends(UpperCAmelCase__ , ['torch'] )
def lowercase__( *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Union[str, Any] ):
requires_backends(UpperCAmelCase__ , ['torch'] )
def lowercase__( *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Any ):
requires_backends(UpperCAmelCase__ , ['torch'] )
def lowercase__( *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Optional[int] ):
requires_backends(UpperCAmelCase__ , ['torch'] )
def lowercase__( *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(UpperCAmelCase__ , ['torch'] )
def lowercase__( *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Optional[int] ):
requires_backends(UpperCAmelCase__ , ['torch'] )
def lowercase__( *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[Any] ):
requires_backends(UpperCAmelCase__ , ['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> int:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> int:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> int:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> int:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Any:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Any:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Tuple:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> int:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Any:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> int:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Any:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Any:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Any:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Any:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Tuple:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Tuple:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Tuple:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> int:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Tuple:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Any:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Any:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowercase = ['torch']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
| 371
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=33 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_input_mask
lowercase_ : Optional[Any] = use_token_type_ids
lowercase_ : List[str] = use_labels
lowercase_ : Any = vocab_size
lowercase_ : List[str] = hidden_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : Tuple = max_position_embeddings
lowercase_ : Optional[int] = type_vocab_size
lowercase_ : Optional[int] = type_sequence_label_size
lowercase_ : Dict = initializer_range
lowercase_ : int = num_labels
lowercase_ : Any = num_choices
lowercase_ : int = scope
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Dict = None
if self.use_input_mask:
lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Tuple = None
lowercase_ : Tuple = None
lowercase_ : Tuple = None
if self.use_labels:
lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
lowercase_ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : List[Any] = EsmModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Tuple = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Union[str, Any] = model(__UpperCamelCase )
lowercase_ : int = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = EsmForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = self.num_labels
lowercase_ : int = EsmForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Any = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Optional[int] = config_and_inputs
lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = False
lowercase = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = ()
lowercase = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = EsmModelTester(self )
lowercase_ : List[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ : Optional[Any] = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = EsmModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowercase_ : str = EsmEmbeddings(config=__UpperCamelCase )
lowercase_ : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowercase_ : List[Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowercase_ : Tuple = create_position_ids_from_input_ids(__UpperCamelCase ,model.padding_idx )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowercase_ : List[Any] = EsmEmbeddings(config=__UpperCamelCase )
lowercase_ : List[Any] = torch.empty(2 ,4 ,30 )
lowercase_ : List[str] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowercase_ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowercase_ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCamelCase ( lowercase_ ):
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
with torch.no_grad():
lowercase_ : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase_ : List[str] = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = 33
lowercase_ : Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : List[str] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
lowercase_ : int = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase_ : Dict = model(__UpperCamelCase )[0]
# compare the actual values for a slice.
lowercase_ : Any = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321
| 0
|
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Optional[int] = x
lowercase_ : Tuple = y
for step in range(__a ): # noqa: B007
lowercase_ : Optional[Any] = a * a - b * b + x
lowercase_ : Optional[int] = 2 * a * b + y
lowercase_ : Union[str, Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowercase__( __SCREAMING_SNAKE_CASE : float ):
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def lowercase__( __SCREAMING_SNAKE_CASE : float ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(__a , 1 , 1 ) )
def lowercase__( __SCREAMING_SNAKE_CASE : int = 8_00 , __SCREAMING_SNAKE_CASE : int = 6_00 , __SCREAMING_SNAKE_CASE : float = -0.6 , __SCREAMING_SNAKE_CASE : float = 0 , __SCREAMING_SNAKE_CASE : float = 3.2 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : bool = True , ):
lowercase_ : List[Any] = Image.new('RGB' , (image_width, image_height) )
lowercase_ : List[Any] = img.load()
# loop through the image-coordinates
for image_x in range(__a ):
for image_y in range(__a ):
# determine the figure-coordinates based on the image-coordinates
lowercase_ : int = figure_width / image_width * image_height
lowercase_ : int = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowercase_ : Dict = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowercase_ : Tuple = get_distance(__a , __a , __a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowercase_ : int = get_color_coded_rgb(__a )
else:
lowercase_ : Any = get_black_and_white_rgb(__a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__SCREAMING_SNAKE_CASE =get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 350
|
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[int] = bp_numa
lowercase_ : Dict = bp_numa
lowercase_ : Tuple = bp_numa
lowercase_ : List[Any] = conva_get[:2]
lowercase_ : int = conva_get[2]
lowercase_ : Dict = size_pa
lowercase_ : int = rate_w
lowercase_ : Union[str, Any] = rate_t
lowercase_ : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1
lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(__UpperCamelCase ,'wb' ) as f:
pickle.dump(__UpperCamelCase ,__UpperCamelCase )
print(f'''Model saved: {save_path}''' )
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
with open(__UpperCamelCase ,'rb' ) as f:
lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301
lowercase_ : str = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' )
lowercase_ : Optional[Any] = model_dic.get('num_bp1' )
lowercase_ : str = model_dic.get('num_bp2' )
lowercase_ : Optional[Any] = model_dic.get('num_bp3' )
lowercase_ : Union[str, Any] = model_dic.get('rate_weight' )
lowercase_ : Optional[int] = model_dic.get('rate_thre' )
# create model instance
lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# modify model parameter
lowercase_ : Optional[Any] = model_dic.get('w_conv1' )
lowercase_ : Tuple = model_dic.get('wkj' )
lowercase_ : Union[str, Any] = model_dic.get('vji' )
lowercase_ : Optional[Any] = model_dic.get('thre_conv1' )
lowercase_ : Dict = model_dic.get('thre_bp2' )
lowercase_ : Optional[int] = model_dic.get('thre_bp3' )
return conv_ins
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return round(__UpperCamelCase ,3 )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = convs[0]
lowercase_ : Any = convs[1]
lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0]
# get the data slice of original image data, data_focus
lowercase_ : Tuple = []
for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
lowercase_ : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__UpperCamelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase_ : Dict = []
lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__UpperCamelCase ):
lowercase_ : Tuple = []
for i_focus in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[int] = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__UpperCamelCase ) )
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape(
__UpperCamelCase ,__UpperCamelCase )
data_featuremap.append(__UpperCamelCase )
# expanding the data slice to One dimenssion
lowercase_ : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) )
lowercase_ : str = np.asarray(__UpperCamelCase )
return focus_list, data_featuremap
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple:
'''simple docstring'''
lowercase_ : Union[str, Any] = len(featuremaps[0] )
lowercase_ : str = int(size_map / size_pooling )
lowercase_ : Optional[int] = []
for i_map in range(len(__UpperCamelCase ) ):
lowercase_ : int = featuremaps[i_map]
lowercase_ : List[str] = []
for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[str] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__UpperCamelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__UpperCamelCase ) )
lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase )
featuremap_pooled.append(__UpperCamelCase )
return featuremap_pooled
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Tuple = []
for i in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[Any] = np.shape(data[i] )
lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] )
lowercase_ : List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(__UpperCamelCase )
lowercase_ : int = np.asarray(__UpperCamelCase )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Any = np.asarray(__UpperCamelCase )
lowercase_ : Any = np.shape(__UpperCamelCase )
lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Any = []
lowercase_ : List[Any] = 0
for i_map in range(__UpperCamelCase ):
lowercase_ : List[str] = np.ones((size_map, size_map) )
for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = pd_pool[
i_pool
]
lowercase_ : Any = i_pool + 1
lowercase_ : Optional[int] = np.multiply(
__UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(__UpperCamelCase )
return pd_all
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]:
'''simple docstring'''
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) )
print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) )
lowercase_ : int = 0
lowercase_ : Tuple = []
lowercase_ : Tuple = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowercase_ : List[str] = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(__UpperCamelCase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase_ : int = np.asmatrix(datas_train[p] )
lowercase_ : Any = np.asarray(datas_teach[p] )
lowercase_ , lowercase_ : Tuple = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : Optional[int] = np.shape(__UpperCamelCase )
lowercase_ : Optional[int] = self._expand(__UpperCamelCase )
lowercase_ : int = data_bp_input
lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa
lowercase_ : Dict = self.sig(__UpperCamelCase )
lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa
lowercase_ : int = self.sig(__UpperCamelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase_ : str = np.multiply(
(data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Optional[int] = np.multiply(
np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji )
lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase_ : Dict = pd_conva_pooled.T.getA().tolist()
lowercase_ : List[Any] = self._calculate_gradient_from_pool(
__UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase_ : Dict = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre
lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase_ : int = rp + 1
lowercase_ : Union[str, Any] = error_count / patterns
all_mse.append(__UpperCamelCase )
def draw_error():
lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__UpperCamelCase ,'+-' )
plt.plot(__UpperCamelCase ,'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(__UpperCamelCase ,alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) )
for p in range(len(__UpperCamelCase ) ):
lowercase_ : List[Any] = np.asmatrix(datas_test[p] )
lowercase_ , lowercase_ : Optional[Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : List[str] = self._expand(__UpperCamelCase )
lowercase_ : Any = data_bp_input
lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa
lowercase_ : str = self.sig(__UpperCamelCase )
lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa
lowercase_ : Optional[int] = self.sig(__UpperCamelCase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out]
return np.asarray(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase )
lowercase_ , lowercase_ : Union[str, Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 321
| 0
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__SCREAMING_SNAKE_CASE ="0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class UpperCamelCase ( unittest.TestCase ):
@classmethod
def _UpperCAmelCase ( cls ) -> Tuple:
'''simple docstring'''
lowercase_ : str = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def _UpperCAmelCase ( cls ) -> Dict:
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-model-flax' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-model-flax-org' )
except HTTPError:
pass
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
lowercase_ : int = FlaxBertModel(lowercase_ )
model.push_to_hub('test-model-flax' ,use_auth_token=self._token )
lowercase_ : Dict = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
lowercase_ : List[Any] = flatten_dict(unfreeze(model.params ) )
lowercase_ : Union[str, Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase_ : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ ,1e-3 ,msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token ,repo_id='test-model-flax' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase_ ,repo_id='test-model-flax' ,push_to_hub=lowercase_ ,use_auth_token=self._token )
lowercase_ : Tuple = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
lowercase_ : Optional[Any] = flatten_dict(unfreeze(model.params ) )
lowercase_ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase_ : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ ,1e-3 ,msg=f'''{key} not identical''' )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Tuple = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
lowercase_ : Union[str, Any] = FlaxBertModel(lowercase_ )
model.push_to_hub('valid_org/test-model-flax-org' ,use_auth_token=self._token )
lowercase_ : Union[str, Any] = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
lowercase_ : Any = flatten_dict(unfreeze(model.params ) )
lowercase_ : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase_ : Dict = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ ,1e-3 ,msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-model-flax-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowercase_ ,repo_id='valid_org/test-model-flax-org' ,push_to_hub=lowercase_ ,use_auth_token=self._token )
lowercase_ : Any = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
lowercase_ : int = flatten_dict(unfreeze(model.params ) )
lowercase_ : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase_ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ ,1e-3 ,msg=f'''{key} not identical''' )
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : Optional[int] = True
lowercase_ : Union[str, Any] = flatten_dict(modela.params )
lowercase_ : Union[str, Any] = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
lowercase_ : int = False
return models_are_equal
@require_flax
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
lowercase_ : Any = FlaxBertModel(lowercase_ )
lowercase_ : Optional[Any] = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowercase_ ,lowercase_ ) )
with self.assertRaises(lowercase_ ):
lowercase_ : Any = FlaxBertModel.from_pretrained(lowercase_ )
lowercase_ : int = FlaxBertModel.from_pretrained(lowercase_ ,subfolder=lowercase_ )
self.assertTrue(check_models_equal(lowercase_ ,lowercase_ ) )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
lowercase_ : int = FlaxBertModel(lowercase_ )
lowercase_ : Tuple = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowercase_ ,lowercase_ ) ,max_shard_size='10KB' )
with self.assertRaises(lowercase_ ):
lowercase_ : Optional[Any] = FlaxBertModel.from_pretrained(lowercase_ )
lowercase_ : Tuple = FlaxBertModel.from_pretrained(lowercase_ ,subfolder=lowercase_ )
self.assertTrue(check_models_equal(lowercase_ ,lowercase_ ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[Any] = "bert"
lowercase_ : List[str] = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(lowercase_ ):
lowercase_ : Optional[Any] = FlaxBertModel.from_pretrained(lowercase_ )
lowercase_ : int = FlaxBertModel.from_pretrained(lowercase_ ,subfolder=lowercase_ )
self.assertIsNotNone(lowercase_ )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Optional[Any] = "bert"
lowercase_ : str = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(lowercase_ ):
lowercase_ : Optional[int] = FlaxBertModel.from_pretrained(lowercase_ )
lowercase_ : Optional[int] = FlaxBertModel.from_pretrained(lowercase_ ,subfolder=lowercase_ )
self.assertIsNotNone(lowercase_ )
| 351
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
lowercase_ : Dict = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : str = 'patrickvonplaten/t5-tiny-random'
lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] )
lowercase_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__UpperCamelCase ):
self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'current' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
| 321
| 0
|
"""simple docstring"""
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__SCREAMING_SNAKE_CASE =[
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None ):
"""simple docstring"""
lowercase_ : List[Any] = True
while ask_again:
lowercase_ : str = input(_snake_case )
try:
if default is not None and len(_snake_case ) == 0:
return default
return convert_value(_snake_case ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_snake_case )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any]=[] , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Optional[Any]=0 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = BulletMenu(_snake_case , _snake_case )
lowercase_ : List[str] = menu.run(default_choice=_snake_case )
return convert_value(_snake_case ) if convert_value is not None else result
def lowercase__( __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
lowercase_ : Union[str, Any] = int(_snake_case )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def lowercase__( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
lowercase_ : int = int(_snake_case )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : Optional[int] = int(_snake_case )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
lowercase_ : Any = int(_snake_case )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Any = int(_snake_case )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class UpperCamelCase ( argparse.RawDescriptionHelpFormatter ):
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = super()._format_usage(a_ ,a_ ,a_ ,a_ )
lowercase_ : Optional[Any] = usage.replace('<command> [<args>] ' ,'' )
return usage
| 352
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( lowercase_ ):
lowercase = ['input_values', 'padding_mask']
def __init__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = 2_4000 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
super().__init__(feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : List[str] = chunk_length_s
lowercase_ : Tuple = overlap
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
lowercase_ : Optional[int] = True
lowercase_ : Optional[int] = bool(
isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ):
lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa )
elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowercase_ : List[str] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowercase_ : Dict = [np.asarray(__UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__UpperCamelCase ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
lowercase_ : Optional[int] = None
lowercase_ : List[Any] = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowercase_ : List[Any] = min(array.shape[0] for array in raw_audio )
lowercase_ : int = int(np.floor(max_length / self.chunk_stride ) )
lowercase_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowercase_ : List[Any] = max(array.shape[0] for array in raw_audio )
lowercase_ : Tuple = int(np.ceil(max_length / self.chunk_stride ) )
lowercase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
lowercase_ : Union[str, Any] = 'max_length'
else:
lowercase_ : int = input_values
# normal padding on batch
if padded_inputs is None:
lowercase_ : int = self.pad(
__UpperCamelCase ,max_length=__UpperCamelCase ,truncation=__UpperCamelCase ,padding=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,)
if padding:
lowercase_ : Optional[int] = padded_inputs.pop('attention_mask' )
lowercase_ : Dict = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
lowercase_ : Optional[int] = example[..., None]
input_values.append(example.T )
lowercase_ : str = input_values
if return_tensors is not None:
lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs
| 321
| 0
|
"""simple docstring"""
from __future__ import annotations
import queue
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = data
lowercase_ : str = None
lowercase_ : Union[str, Any] = None
def lowercase__( ):
print('\n********Press N to stop entering at any point of time********\n' )
lowercase_ : Union[str, Any] = input('Enter the value of the root node: ' ).strip().lower()
lowercase_ : queue.Queue = queue.Queue()
lowercase_ : Optional[int] = TreeNode(int(_A ) )
q.put(_A )
while not q.empty():
lowercase_ : str = q.get()
lowercase_ : Any = F'''Enter the left node of {node_found.data}: '''
lowercase_ : Optional[Any] = input(_A ).strip().lower() or 'n'
if check == "n":
return tree_node
lowercase_ : Optional[int] = TreeNode(int(_A ) )
lowercase_ : Optional[int] = left_node
q.put(_A )
lowercase_ : List[Any] = F'''Enter the right node of {node_found.data}: '''
lowercase_ : int = input(_A ).strip().lower() or 'n'
if check == "n":
return tree_node
lowercase_ : int = TreeNode(int(_A ) )
lowercase_ : Union[str, Any] = right_node
q.put(_A )
raise
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
if not isinstance(_A , _A ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ):
if not isinstance(_A , _A ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowercase__( __SCREAMING_SNAKE_CASE : Any ):
if not isinstance(_A , _A ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
if not isinstance(_A , _A ) or not node:
return
lowercase_ : queue.Queue = queue.Queue()
q.put(_A )
while not q.empty():
lowercase_ : str = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase__( __SCREAMING_SNAKE_CASE : Any ):
if not isinstance(_A , _A ) or not node:
return
lowercase_ : queue.Queue = queue.Queue()
q.put(_A )
while not q.empty():
lowercase_ : Dict = []
while not q.empty():
lowercase_ : List[Any] = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_A )
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ):
if not isinstance(_A , _A ) or not node:
return
lowercase_ : list[TreeNode] = []
lowercase_ : int = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(_A )
lowercase_ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
lowercase_ : Optional[Any] = stack.pop()
# start to traverse its right child
lowercase_ : List[Any] = n.right
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] ):
if not isinstance(_A , _A ) or not node:
return
lowercase_ : list[TreeNode] = []
lowercase_ : Union[str, Any] = node
while n or stack:
while n:
stack.append(_A )
lowercase_ : Optional[int] = n.left
lowercase_ : str = stack.pop()
print(n.data , end=',' )
lowercase_ : Union[str, Any] = n.right
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
if not isinstance(_A , _A ) or not node:
return
lowercase_ : str = [], []
lowercase_ : List[Any] = node
stacka.append(_A )
while stacka: # to find the reversed order of post order, store it in stack2
lowercase_ : List[Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_A )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowercase__( __SCREAMING_SNAKE_CASE : Dict = "" , __SCREAMING_SNAKE_CASE : Optional[int]=50 , __SCREAMING_SNAKE_CASE : Dict="*" ):
if not s:
return "\n" + width * char
lowercase_ : str = divmod(width - len(_A ) - 2 , 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
__SCREAMING_SNAKE_CASE =build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 353
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 321
| 0
|
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : Optional[int] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ , lowercase_ : Dict = emb.weight.shape
lowercase_ : List[Any] = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = emb.weight.data
return lin_layer
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]="facebook/mbart-large-en-ro" , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : str=False ):
lowercase_ : Optional[Any] = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
remove_ignore_keys_(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = state_dict['encoder.embed_tokens.weight'].shape[0]
lowercase_ : Tuple = MBartConfig.from_pretrained(__SCREAMING_SNAKE_CASE , vocab_size=__SCREAMING_SNAKE_CASE )
if mbart_aa and finetuned:
lowercase_ : Union[str, Any] = 'relu'
lowercase_ : Dict = state_dict['decoder.embed_tokens.weight']
lowercase_ : str = MBartForConditionalGeneration(__SCREAMING_SNAKE_CASE )
model.model.load_state_dict(__SCREAMING_SNAKE_CASE )
if finetuned:
lowercase_ : str = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
__SCREAMING_SNAKE_CASE =parser.parse_args()
__SCREAMING_SNAKE_CASE =convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 354
|
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ):
require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
| 321
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__SCREAMING_SNAKE_CASE ={
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =['''ConvNextFeatureExtractor''']
__SCREAMING_SNAKE_CASE =['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 355
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False ):
lowercase_ : int = 'backbone.' if is_semantic else ''
lowercase_ : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=False ):
for i in range(config.num_hidden_layers ):
lowercase_ : Any = 'backbone.' if is_semantic else ''
# queries, keys and values
lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' )
lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' )
lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' )
lowercase_ : List[str] = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = q_bias
lowercase_ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Any = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowercase_ : Any = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' )
lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' )
lowercase_ : Tuple = gamma_a
lowercase_ : List[Any] = gamma_a
def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = val
def lowercase__( ):
lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ):
lowercase_ : List[str] = False if 'rvlcdip' in checkpoint_url else True
lowercase_ : Dict = BeitConfig(use_absolute_position_embeddings=__SCREAMING_SNAKE_CASE , use_mask_token=__SCREAMING_SNAKE_CASE )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowercase_ : Any = 10_24
lowercase_ : List[str] = 40_96
lowercase_ : Tuple = 24
lowercase_ : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
lowercase_ : Optional[Any] = 16
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : int = 'rvlcdip-id2label.json'
lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase_ : str = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowercase_ : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
lowercase_ : Optional[Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowercase_ : Optional[int] = BeitForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(__SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Check outputs on an image
lowercase_ : List[Any] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__SCREAMING_SNAKE_CASE )
lowercase_ : str = prepare_img()
lowercase_ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
lowercase_ : int = encoding['pixel_values']
lowercase_ : Any = model(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = outputs.logits
# verify logits
lowercase_ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(__SCREAMING_SNAKE_CASE ), "Shape of logits not as expected"
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
if has_lm_head:
lowercase_ : List[str] = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
lowercase_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , )
model.push_to_hub(
repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 321
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
__SCREAMING_SNAKE_CASE = parser.parse_args()
if args.model_type == "roberta":
__SCREAMING_SNAKE_CASE = RobertaForMaskedLM.from_pretrained(args.model_name)
__SCREAMING_SNAKE_CASE = "roberta"
elif args.model_type == "gpt2":
__SCREAMING_SNAKE_CASE = GPTaLMHeadModel.from_pretrained(args.model_name)
__SCREAMING_SNAKE_CASE = "transformer"
__SCREAMING_SNAKE_CASE = model.state_dict()
__SCREAMING_SNAKE_CASE = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
__SCREAMING_SNAKE_CASE = state_dict[F"{prefix}.{param_name}"]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
__SCREAMING_SNAKE_CASE = F"{prefix}.embeddings.{w}.weight"
__SCREAMING_SNAKE_CASE = state_dict[param_name]
for w in ["weight", "bias"]:
__SCREAMING_SNAKE_CASE = F"{prefix}.embeddings.LayerNorm.{w}"
__SCREAMING_SNAKE_CASE = state_dict[param_name]
# Transformer Blocks #
__SCREAMING_SNAKE_CASE = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
__SCREAMING_SNAKE_CASE = state_dict[
F"{prefix}.h.{teacher_idx}.{layer}.{w}"
]
__SCREAMING_SNAKE_CASE = state_dict[F"{prefix}.h.{teacher_idx}.attn.bias"]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
__SCREAMING_SNAKE_CASE = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
__SCREAMING_SNAKE_CASE = state_dict[F"{layer}"]
if args.vocab_transform:
for w in ["weight", "bias"]:
__SCREAMING_SNAKE_CASE = state_dict[F"lm_head.dense.{w}"]
__SCREAMING_SNAKE_CASE = state_dict[F"lm_head.layer_norm.{w}"]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
__SCREAMING_SNAKE_CASE = state_dict[F"{prefix}.ln_f.{w}"]
__SCREAMING_SNAKE_CASE = state_dict["lm_head.weight"]
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 356
|
"""simple docstring"""
__SCREAMING_SNAKE_CASE ={
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
__SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()}
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Union[str, Any] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
lowercase_ : Dict = ''
for word in coded.split():
while len(__SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
lowercase_ : Any = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 321
| 0
|
"""simple docstring"""
import numpy as np
def lowercase__( __SCREAMING_SNAKE_CASE : np.array ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
def count_of_possible_combinations_with_dp_array(
__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowercase_ : str = sum(
count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE )
for item in array )
lowercase_ : Tuple = answer
return answer
lowercase_ : Optional[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Dict = [0] * (target + 1)
lowercase_ : Dict = 1
for i in range(1 , target + 1 ):
for j in range(__SCREAMING_SNAKE_CASE ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE =3
__SCREAMING_SNAKE_CASE =5
__SCREAMING_SNAKE_CASE =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 321
| 0
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
__SCREAMING_SNAKE_CASE =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__SCREAMING_SNAKE_CASE =CLIPImageProcessor()
__SCREAMING_SNAKE_CASE =CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
__SCREAMING_SNAKE_CASE =UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 358
|
"""simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : int = set_counts
lowercase_ : List[Any] = max(__UpperCamelCase )
lowercase_ : Union[str, Any] = len(__UpperCamelCase )
lowercase_ : Dict = [1] * num_sets
lowercase_ : Optional[int] = list(range(__UpperCamelCase ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase )
lowercase_ : int = self.get_parent(__UpperCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase_ : Tuple = 0
lowercase_ : str = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase_ : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase_ : str = 0
lowercase_ : Tuple = src_parent
lowercase_ : int = self.set_counts[src_parent]
lowercase_ : str = max(self.max_set ,__UpperCamelCase )
return True
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 321
| 0
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__SCREAMING_SNAKE_CASE =(
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__SCREAMING_SNAKE_CASE =[ord(letter) for letter in string.ascii_lowercase]
__SCREAMING_SNAKE_CASE ={ord(char) for char in VALID_CHARS}
__SCREAMING_SNAKE_CASE =["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase__( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : tuple[int, ...] ) -> str | None:
lowercase_ : Optional[Any] = ''
lowercase_ : Tuple = 42
lowercase_ : Tuple = 42
lowercase_ : Optional[Any] = 42
for keychar, cipherchar in zip(cycle(_UpperCamelCase ) , _UpperCamelCase ):
lowercase_ : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_UpperCamelCase )
return decoded
def lowercase__( __SCREAMING_SNAKE_CASE : list[int] ) -> list[str]:
lowercase_ : str = []
for key in product(_UpperCamelCase , repeat=3 ):
lowercase_ : Optional[Any] = try_key(_UpperCamelCase , _UpperCamelCase )
if encoded is not None:
possibles.append(_UpperCamelCase )
return possibles
def lowercase__( __SCREAMING_SNAKE_CASE : list[str] , __SCREAMING_SNAKE_CASE : str ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase__( __SCREAMING_SNAKE_CASE : str = "p059_cipher.txt" ) -> int:
lowercase_ : int = 42
lowercase_ : int = 42
lowercase_ : Dict = 42
lowercase_ : Union[str, Any] = 42
lowercase_ : str = Path(_UpperCamelCase ).parent.joinpath(_UpperCamelCase ).read_text(encoding='utf-8' )
lowercase_ : Union[str, Any] = [int(_UpperCamelCase ) for number in data.strip().split(',' )]
lowercase_ : str = filter_valid_chars(_UpperCamelCase )
for common_word in COMMON_WORDS:
lowercase_ : List[Any] = filter_common_word(_UpperCamelCase , _UpperCamelCase )
if len(_UpperCamelCase ) == 1:
break
lowercase_ : Union[str, Any] = possibles[0]
return sum(ord(_UpperCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F"{solution() = }")
| 359
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__SCREAMING_SNAKE_CASE ={
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__SCREAMING_SNAKE_CASE ={"facebook/blenderbot-3B": 128}
class UpperCamelCase ( lowercase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BlenderbotTokenizer
def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]:
'''simple docstring'''
super().__init__(
__UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,)
lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space:
lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) )
lowercase_ : Any = add_prefix_space
lowercase_ : Tuple = pre_tok_class(**__UpperCamelCase )
lowercase_ : int = add_prefix_space
lowercase_ : Any = 'post_processor'
lowercase_ : Optional[Any] = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase )
if tokenizer_component_instance:
lowercase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ : str = tuple(state['sep'] )
if "cls" in state:
lowercase_ : Union[str, Any] = tuple(state['cls'] )
lowercase_ : str = False
if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space:
lowercase_ : Dict = add_prefix_space
lowercase_ : int = True
if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets:
lowercase_ : Optional[Any] = trim_offsets
lowercase_ : Tuple = True
if changes_to_apply:
lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,state.pop('type' ) )
lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value
lowercase_ : str = value
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ : Optional[int] = kwargs.get('is_split_into_words' ,__UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : int = [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Any:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]:
'''simple docstring'''
lowercase_ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
lowercase_ : Dict = ' '.join(__UpperCamelCase )
lowercase_ : str = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
lowercase_ : List[str] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 321
| 0
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase_ : Optional[Any] = SwinConfig(image_size=1_92 )
if "base" in model_name:
lowercase_ : Tuple = 6
lowercase_ : List[str] = 1_28
lowercase_ : Tuple = (2, 2, 18, 2)
lowercase_ : int = (4, 8, 16, 32)
elif "large" in model_name:
lowercase_ : Optional[int] = 12
lowercase_ : Dict = 1_92
lowercase_ : Optional[Any] = (2, 2, 18, 2)
lowercase_ : Dict = (6, 12, 24, 48)
else:
raise ValueError('Model not supported, only supports base and large variants' )
lowercase_ : Any = window_size
lowercase_ : str = embed_dim
lowercase_ : Optional[int] = depths
lowercase_ : str = num_heads
return config
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
if "encoder.mask_token" in name:
lowercase_ : Dict = name.replace('encoder.mask_token' , 'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
lowercase_ : List[Any] = name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
lowercase_ : List[Any] = name.replace('encoder.patch_embed.norm' , 'embeddings.norm' )
if "attn.proj" in name:
lowercase_ : Any = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowercase_ : str = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowercase_ : Union[str, Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowercase_ : int = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowercase_ : Tuple = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowercase_ : int = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
lowercase_ : Dict = 'layernorm.weight'
if name == "encoder.norm.bias":
lowercase_ : Tuple = 'layernorm.bias'
if "decoder" in name:
pass
else:
lowercase_ : List[str] = 'swin.' + name
return name
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple ):
for key in orig_state_dict.copy().keys():
lowercase_ : Any = orig_state_dict.pop(lowerCAmelCase__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowercase_ : str = key.split('.' )
lowercase_ : List[Any] = int(key_split[2] )
lowercase_ : Tuple = int(key_split[4] )
lowercase_ : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase_ : str = val[:dim, :]
lowercase_ : int = val[
dim : dim * 2, :
]
lowercase_ : Any = val[-dim:, :]
else:
lowercase_ : Tuple = val[
:dim
]
lowercase_ : Optional[int] = val[
dim : dim * 2
]
lowercase_ : Optional[Any] = val[
-dim:
]
else:
lowercase_ : Any = val
return orig_state_dict
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : List[Any] = torch.load(lowerCAmelCase__ , map_location='cpu' )['model']
lowercase_ : Optional[Any] = get_swin_config(lowerCAmelCase__ )
lowercase_ : int = SwinForMaskedImageModeling(lowerCAmelCase__ )
model.eval()
lowercase_ : Dict = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
lowercase_ : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase_ : List[str] = ViTImageProcessor(size={'height': 1_92, 'width': 1_92} )
lowercase_ : Union[str, Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
lowercase_ : List[str] = image_processor(images=lowerCAmelCase__ , return_tensors='pt' )
with torch.no_grad():
lowercase_ : str = model(**lowerCAmelCase__ ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(F'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(F'''microsoft/{model_name}''' )
image_processor.push_to_hub(F'''microsoft/{model_name}''' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="swin-base-simmim-window6-192",
type=str,
choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"],
help="Name of the Swin SimMIM model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth",
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 360
|
"""simple docstring"""
import os
import sys
import unittest
__SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py")
__SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase )
lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase )
lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'}
lowercase_ : Union[str, Any] = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase )
lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase )
lowercase_ : Any = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
lowercase_ : Any = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase )
lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase )
lowercase_ : Tuple = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
lowercase_ : Optional[Any] = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
| 321
| 0
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
lowercase_ : Union[str, Any] = (boundary[1] - boundary[0]) / steps
lowercase_ : Any = boundary[0]
lowercase_ : Optional[Any] = boundary[1]
lowercase_ : Tuple = make_points(a__ , a__ , a__ )
lowercase_ : Union[str, Any] = 0.0
y += (h / 2.0) * f(a__ )
for i in x_i:
# print(i)
y += h * f(a__ )
y += (h / 2.0) * f(a__ )
return y
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : List[str] = a + h
while x < (b - h):
yield x
lowercase_ : List[str] = x + h
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ): # enter your function here
lowercase_ : List[str] = (x - 0) * (x - 0)
return y
def lowercase__( ):
lowercase_ : Dict = 0.0 # Lower bound of integration
lowercase_ : Dict = 1.0 # Upper bound of integration
lowercase_ : int = 10.0 # define number of steps or resolution
lowercase_ : Any = [a, b] # define boundary of integration
lowercase_ : str = method_a(a__ , a__ )
print(F'''y = {y}''' )
if __name__ == "__main__":
main()
| 361
|
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ):
with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX )
try:
print(*__SCREAMING_SNAKE_CASE )
finally:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
__SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank)
__SCREAMING_SNAKE_CASE =socket.gethostname()
__SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__SCREAMING_SNAKE_CASE =dist.get_rank()
__SCREAMING_SNAKE_CASE =dist.get_world_size()
printflock(F"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(F"{gpu} is broken")
raise
| 321
| 0
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__SCREAMING_SNAKE_CASE ="\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__SCREAMING_SNAKE_CASE ="\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__SCREAMING_SNAKE_CASE ="\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def _UpperCAmelCase ( self ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ,id='token' ) ,id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' ,id='token' ) ,id='sequence' ) ,id='references' ),
} ) ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = 1 ,__UpperCamelCase = 4 ,) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__UpperCamelCase ,hypotheses=__UpperCamelCase ,min_len=__UpperCamelCase ,max_len=__UpperCamelCase )
}
| 362
|
"""simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : List[Any] = name
lowercase_ : int = val
def __str__( self ) -> Tuple:
'''simple docstring'''
return f'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return self.val < other.val
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = {}
lowercase_ : Tuple = {}
lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase )
def __getitem__( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
return self.get_value(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
return (idx - 1) // 2
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return idx * 2 + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return idx * 2 + 2
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
return self.heap_dict[key]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1
lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
lowercase_ : Any = idx
lowercase_ : str = i.val
for i in range(__UpperCamelCase ,-1 ,-1 ):
self.sift_down(__UpperCamelCase ,__UpperCamelCase )
return array
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
while True:
lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase )
lowercase_ : List[str] = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
lowercase_ : List[str] = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
lowercase_ : Dict = r
if smallest != idx:
lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx]
(
(
lowercase_
) , (
lowercase_
) ,
) : str = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowercase_ : Any = smallest
else:
break
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowercase_ : int = p
lowercase_ : str = self.get_parent_idx(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return self.heap[0]
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowercase_ : Tuple = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 ,self.heap )
return x
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
self.heap.append(__UpperCamelCase )
lowercase_ : Tuple = len(self.heap ) - 1
lowercase_ : Optional[int] = node.val
self.sift_up(len(self.heap ) - 1 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.heap ) == 0
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowercase_ : Any = new_value
lowercase_ : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
__SCREAMING_SNAKE_CASE =Node("R", -1)
__SCREAMING_SNAKE_CASE =Node("B", 6)
__SCREAMING_SNAKE_CASE =Node("A", 3)
__SCREAMING_SNAKE_CASE =Node("X", 1)
__SCREAMING_SNAKE_CASE =Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
| 0
|
"""simple docstring"""
import string
from math import logaa
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Dict = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
lowercase_ : List[str] = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Dict = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
lowercase_ : Union[str, Any] = corpus_without_punctuation.split('\n' )
lowercase_ : List[Any] = term.lower()
return (len([doc for doc in docs if term in doc] ), len(UpperCamelCase__ ))
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str]=False ):
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
return round(tf * idf , 3 )
| 363
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[Any] = tempfile.mkdtemp()
# fmt: off
lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) )
lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowercase_ : Tuple = {'unk_token': '<unk>'}
lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCamelCase ) )
lowercase_ : Any = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_tokenizer()
lowercase_ : List[Any] = self.get_rust_tokenizer()
lowercase_ : Tuple = self.get_image_processor()
lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase )
lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 )
lowercase_ : Any = CLIPSegProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = self.get_image_processor()
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : List[Any] = self.prepare_image_inputs()
lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' )
lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = self.get_image_processor()
lowercase_ : List[Any] = self.get_tokenizer()
lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Dict = 'lower newer'
lowercase_ : Any = processor(text=__UpperCamelCase )
lowercase_ : int = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = self.get_image_processor()
lowercase_ : str = self.get_tokenizer()
lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : List[Any] = 'lower newer'
lowercase_ : str = self.prepare_image_inputs()
lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = self.get_image_processor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Optional[int] = self.prepare_image_inputs()
lowercase_ : Optional[Any] = self.prepare_image_inputs()
lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = self.get_image_processor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase )
lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
| 321
| 0
|
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase ( SCREAMING_SNAKE_CASE__ ):
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'num_attention_heads' ) )
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=64 ,__UpperCamelCase=3 ,__UpperCamelCase=3 ,__UpperCamelCase=2 ,__UpperCamelCase=1 ,__UpperCamelCase=16 ,__UpperCamelCase=[128, 256, 384] ,__UpperCamelCase=[4, 6, 8] ,__UpperCamelCase=[2, 3, 4] ,__UpperCamelCase=[16, 16, 16] ,__UpperCamelCase=0 ,__UpperCamelCase=[2, 2, 2] ,__UpperCamelCase=[2, 2, 2] ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=2 ,) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = parent
lowercase_ : Optional[int] = batch_size
lowercase_ : Union[str, Any] = image_size
lowercase_ : Optional[int] = num_channels
lowercase_ : int = kernel_size
lowercase_ : Union[str, Any] = stride
lowercase_ : Optional[int] = padding
lowercase_ : List[str] = hidden_sizes
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : Optional[int] = depths
lowercase_ : Optional[int] = key_dim
lowercase_ : Dict = drop_path_rate
lowercase_ : Dict = patch_size
lowercase_ : Union[str, Any] = attention_ratio
lowercase_ : Optional[int] = mlp_ratio
lowercase_ : List[str] = initializer_range
lowercase_ : int = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowercase_ : List[Any] = is_training
lowercase_ : List[Any] = use_labels
lowercase_ : Tuple = num_labels
lowercase_ : int = initializer_range
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : int = None
if self.use_labels:
lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
lowercase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return LevitConfig(
image_size=self.image_size ,num_channels=self.num_channels ,kernel_size=self.kernel_size ,stride=self.stride ,padding=self.padding ,patch_size=self.patch_size ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,depths=self.depths ,key_dim=self.key_dim ,drop_path_rate=self.drop_path_rate ,mlp_ratio=self.mlp_ratio ,attention_ratio=self.attention_ratio ,initializer_range=self.initializer_range ,down_ops=self.down_ops ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Dict = LevitModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
lowercase_ : Any = (self.image_size, self.image_size)
lowercase_ : List[str] = image_size[0], image_size[1]
for _ in range(4 ):
lowercase_ : Tuple = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowercase_ : List[Any] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Any = self.num_labels
lowercase_ : Tuple = LevitForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : str = model(_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
lowercase_ : Optional[int] = config_and_inputs
lowercase_ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase = (
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Tuple = LevitModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,has_text_modality=_SCREAMING_SNAKE_CASE ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not output attentions' )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Tuple = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : int = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
lowercase_ : str = outputs.hidden_states
lowercase_ : Optional[Any] = len(self.model_tester.depths ) + 1
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = (self.model_tester.image_size, self.model_tester.image_size)
lowercase_ : str = image_size[0], image_size[1]
for _ in range(4 ):
lowercase_ : Tuple = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowercase_ : str = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[
height * width,
self.model_tester.hidden_sizes[0],
] ,)
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Dict = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=False ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Tuple = super()._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Any = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_SCREAMING_SNAKE_CASE )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowercase_ : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
lowercase_ : int = self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,return_labels=_SCREAMING_SNAKE_CASE )
lowercase_ : str = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase_ : Union[str, Any] = False
lowercase_ : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(_SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowercase_ : List[str] = model_class(_SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.to(_SCREAMING_SNAKE_CASE )
model.train()
lowercase_ : int = self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,return_labels=_SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[str] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_SCREAMING_SNAKE_CASE ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ):
lowercase_ : Any = problem_type["title"]
lowercase_ : Dict = problem_type["num_labels"]
lowercase_ : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
lowercase_ : int = self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,return_labels=_SCREAMING_SNAKE_CASE )
if problem_type["num_labels"] > 1:
lowercase_ : Dict = inputs["labels"].unsqueeze(1 ).repeat(1 ,problem_type['num_labels'] )
lowercase_ : Optional[int] = inputs["labels"].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_SCREAMING_SNAKE_CASE ) as warning_list:
lowercase_ : str = model(**_SCREAMING_SNAKE_CASE ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Tuple = LevitModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowercase__( ):
lowercase_ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[Any] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.default_image_processor
lowercase_ : Dict = prepare_img()
lowercase_ : List[Any] = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase_ : str = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
lowercase_ : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = torch.tensor([1.0448, -0.3745, -1.8317] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
| 364
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 321
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __lowerCamelCase ( snake_case__ ):
lowercase = 'wav2vec2'
def __init__( self ,__UpperCamelCase=32 ,__UpperCamelCase=768 ,__UpperCamelCase=12 ,__UpperCamelCase=12 ,__UpperCamelCase=3072 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-5 ,__UpperCamelCase="group" ,__UpperCamelCase="gelu" ,__UpperCamelCase=(512, 512, 512, 512, 512, 512, 512) ,__UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) ,__UpperCamelCase=(10, 3, 3, 3, 3, 2, 2) ,__UpperCamelCase=False ,__UpperCamelCase=128 ,__UpperCamelCase=16 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=0.05 ,__UpperCamelCase=10 ,__UpperCamelCase=2 ,__UpperCamelCase=0.0 ,__UpperCamelCase=10 ,__UpperCamelCase=0 ,__UpperCamelCase=320 ,__UpperCamelCase=2 ,__UpperCamelCase=0.1 ,__UpperCamelCase=100 ,__UpperCamelCase=256 ,__UpperCamelCase=256 ,__UpperCamelCase=0.1 ,__UpperCamelCase="sum" ,__UpperCamelCase=False ,__UpperCamelCase=False ,__UpperCamelCase=256 ,__UpperCamelCase=(512, 512, 512, 512, 1500) ,__UpperCamelCase=(5, 3, 3, 1, 1) ,__UpperCamelCase=(1, 2, 3, 1, 1) ,__UpperCamelCase=512 ,__UpperCamelCase=0 ,__UpperCamelCase=1 ,__UpperCamelCase=2 ,__UpperCamelCase=False ,__UpperCamelCase=3 ,__UpperCamelCase=2 ,__UpperCamelCase=3 ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> str:
'''simple docstring'''
super().__init__(**UpperCAmelCase_ ,pad_token_id=UpperCAmelCase_ ,bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ )
lowercase_ : Tuple = hidden_size
lowercase_ : List[str] = feat_extract_norm
lowercase_ : Any = feat_extract_activation
lowercase_ : List[str] = list(UpperCAmelCase_ )
lowercase_ : Any = list(UpperCAmelCase_ )
lowercase_ : List[str] = list(UpperCAmelCase_ )
lowercase_ : Union[str, Any] = conv_bias
lowercase_ : str = num_conv_pos_embeddings
lowercase_ : str = num_conv_pos_embedding_groups
lowercase_ : Optional[int] = len(self.conv_dim )
lowercase_ : Any = num_hidden_layers
lowercase_ : List[str] = intermediate_size
lowercase_ : str = hidden_act
lowercase_ : int = num_attention_heads
lowercase_ : Optional[int] = hidden_dropout
lowercase_ : int = attention_dropout
lowercase_ : List[Any] = activation_dropout
lowercase_ : Tuple = feat_proj_dropout
lowercase_ : Union[str, Any] = final_dropout
lowercase_ : str = layerdrop
lowercase_ : Optional[Any] = layer_norm_eps
lowercase_ : Tuple = initializer_range
lowercase_ : Any = vocab_size
lowercase_ : Tuple = do_stable_layer_norm
lowercase_ : List[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase_ : str = apply_spec_augment
lowercase_ : List[str] = mask_time_prob
lowercase_ : List[Any] = mask_time_length
lowercase_ : Dict = mask_time_min_masks
lowercase_ : Tuple = mask_feature_prob
lowercase_ : Dict = mask_feature_length
lowercase_ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase_ : Union[str, Any] = num_codevectors_per_group
lowercase_ : Any = num_codevector_groups
lowercase_ : List[Any] = contrastive_logits_temperature
lowercase_ : int = feat_quantizer_dropout
lowercase_ : Union[str, Any] = num_negatives
lowercase_ : Optional[Any] = codevector_dim
lowercase_ : Any = proj_codevector_dim
lowercase_ : str = diversity_loss_weight
# ctc loss
lowercase_ : Dict = ctc_loss_reduction
lowercase_ : Any = ctc_zero_infinity
# adapter
lowercase_ : Union[str, Any] = add_adapter
lowercase_ : int = adapter_kernel_size
lowercase_ : Tuple = adapter_stride
lowercase_ : Dict = num_adapter_layers
lowercase_ : Any = output_hidden_size or hidden_size
lowercase_ : Dict = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase_ : Tuple = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase_ : Tuple = list(UpperCAmelCase_ )
lowercase_ : Union[str, Any] = list(UpperCAmelCase_ )
lowercase_ : Tuple = list(UpperCAmelCase_ )
lowercase_ : List[str] = xvector_output_dim
@property
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 365
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = parent
lowercase_ : Tuple = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Optional[Any] = is_training
lowercase_ : Any = use_input_mask
lowercase_ : Optional[Any] = vocab_size
lowercase_ : str = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Union[str, Any] = use_labels
lowercase_ : Union[str, Any] = scope
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : List[str] = None
if self.use_input_mask:
lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : str = self.prepare_config_and_inputs()
lowercase_ : int = True
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = True
lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Union[str, Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,)
lowercase_ : Dict = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int:
'''simple docstring'''
lowercase_ : List[str] = True
lowercase_ : Union[str, Any] = True
lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
# first forward pass
lowercase_ : str = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,)
lowercase_ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 )
lowercase_ : int = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
lowercase_ : List[Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
# select random slice
lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoderTester(self )
lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs()
lowercase_ : Optional[int] = 'bert'
self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase_ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Tuple = model(__UpperCamelCase )[0]
lowercase_ : Dict = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : str = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Dict = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : Dict = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321
| 0
|
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : int = checkpoint
lowercase_ : List[Any] = {}
lowercase_ : Optional[Any] = vae_state_dict['encoder.conv_in.weight']
lowercase_ : Dict = vae_state_dict['encoder.conv_in.bias']
lowercase_ : Any = vae_state_dict['encoder.conv_out.weight']
lowercase_ : Union[str, Any] = vae_state_dict['encoder.conv_out.bias']
lowercase_ : Optional[int] = vae_state_dict['encoder.norm_out.weight']
lowercase_ : List[str] = vae_state_dict['encoder.norm_out.bias']
lowercase_ : int = vae_state_dict['decoder.conv_in.weight']
lowercase_ : int = vae_state_dict['decoder.conv_in.bias']
lowercase_ : List[Any] = vae_state_dict['decoder.conv_out.weight']
lowercase_ : Dict = vae_state_dict['decoder.conv_out.bias']
lowercase_ : Optional[int] = vae_state_dict['decoder.norm_out.weight']
lowercase_ : Dict = vae_state_dict['decoder.norm_out.bias']
lowercase_ : List[str] = vae_state_dict['quant_conv.weight']
lowercase_ : int = vae_state_dict['quant_conv.bias']
lowercase_ : int = vae_state_dict['post_quant_conv.weight']
lowercase_ : Optional[int] = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
lowercase_ : str = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
lowercase_ : Tuple = {
layer_id: [key for key in vae_state_dict if F'''down.{layer_id}''' in key] for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
# Retrieves the keys for the decoder up blocks only
lowercase_ : Any = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
lowercase_ : int = {
layer_id: [key for key in vae_state_dict if F'''up.{layer_id}''' in key] for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
for i in range(SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[str] = [key for key in down_blocks[i] if F'''down.{i}''' in key and F'''down.{i}.downsample''' not in key]
if F'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
lowercase_ : int = vae_state_dict.pop(
F'''encoder.down.{i}.downsample.conv.weight''' )
lowercase_ : List[Any] = vae_state_dict.pop(
F'''encoder.down.{i}.downsample.conv.bias''' )
lowercase_ : Optional[Any] = renew_vae_resnet_paths(SCREAMING_SNAKE_CASE_ )
lowercase_ : List[str] = {'old': F'''down.{i}.block''', 'new': F'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ )
lowercase_ : List[Any] = [key for key in vae_state_dict if 'encoder.mid.block' in key]
lowercase_ : Any = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowercase_ : Union[str, Any] = [key for key in mid_resnets if F'''encoder.mid.block_{i}''' in key]
lowercase_ : Dict = renew_vae_resnet_paths(SCREAMING_SNAKE_CASE_ )
lowercase_ : Optional[Any] = {'old': F'''mid.block_{i}''', 'new': F'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ )
lowercase_ : List[str] = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
lowercase_ : Tuple = renew_vae_attention_paths(SCREAMING_SNAKE_CASE_ )
lowercase_ : Optional[Any] = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ )
conv_attn_to_linear(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
lowercase_ : Optional[Any] = num_up_blocks - 1 - i
lowercase_ : Dict = [
key for key in up_blocks[block_id] if F'''up.{block_id}''' in key and F'''up.{block_id}.upsample''' not in key
]
if F'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
lowercase_ : int = vae_state_dict[
F'''decoder.up.{block_id}.upsample.conv.weight'''
]
lowercase_ : Dict = vae_state_dict[
F'''decoder.up.{block_id}.upsample.conv.bias'''
]
lowercase_ : Optional[Any] = renew_vae_resnet_paths(SCREAMING_SNAKE_CASE_ )
lowercase_ : Union[str, Any] = {'old': F'''up.{block_id}.block''', 'new': F'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ )
lowercase_ : Tuple = [key for key in vae_state_dict if 'decoder.mid.block' in key]
lowercase_ : Optional[int] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowercase_ : Optional[int] = [key for key in mid_resnets if F'''decoder.mid.block_{i}''' in key]
lowercase_ : Dict = renew_vae_resnet_paths(SCREAMING_SNAKE_CASE_ )
lowercase_ : Optional[int] = {'old': F'''mid.block_{i}''', 'new': F'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ )
lowercase_ : Union[str, Any] = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
lowercase_ : Any = renew_vae_attention_paths(SCREAMING_SNAKE_CASE_ )
lowercase_ : Any = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ )
conv_attn_to_linear(SCREAMING_SNAKE_CASE_ )
return new_checkpoint
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , ):
# Only support V1
lowercase_ : Optional[int] = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
lowercase_ : Dict = io.BytesIO(r.content )
lowercase_ : List[str] = OmegaConf.load(SCREAMING_SNAKE_CASE_ )
lowercase_ : Dict = 5_12
lowercase_ : List[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
lowercase_ : List[str] = {}
with safe_open(SCREAMING_SNAKE_CASE_ , framework='pt' , device='cpu' ) as f:
for key in f.keys():
lowercase_ : Tuple = f.get_tensor(SCREAMING_SNAKE_CASE_ )
else:
lowercase_ : Tuple = torch.load(SCREAMING_SNAKE_CASE_ , map_location=SCREAMING_SNAKE_CASE_ )['state_dict']
# Convert the VAE model.
lowercase_ : Any = create_vae_diffusers_config(SCREAMING_SNAKE_CASE_ , image_size=SCREAMING_SNAKE_CASE_ )
lowercase_ : Optional[int] = custom_convert_ldm_vae_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ : str = AutoencoderKL(**SCREAMING_SNAKE_CASE_ )
vae.load_state_dict(SCREAMING_SNAKE_CASE_ )
vae.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
__SCREAMING_SNAKE_CASE =parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 366
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
return None
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
return None
class UpperCamelCase ( unittest.TestCase ):
lowercase = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
from transformers import BertModel
lowercase_ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(__UpperCamelCase ) )
vocab_file.flush()
lowercase_ : List[str] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase_ : Optional[Any] = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) )
model.save_pretrained(__UpperCamelCase )
self._test_export(__UpperCamelCase ,'pt' ,12 ,__UpperCamelCase )
@require_tf
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Optional[int] = self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase )
lowercase_ : int = quantize(Path(__UpperCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Tuple = self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase )
lowercase_ : Tuple = quantize(__UpperCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase_ : Dict = Path(__UpperCamelCase ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
return path
except Exception as e:
self.fail(__UpperCamelCase )
@require_torch
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
from transformers import BertModel
lowercase_ : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'pt' )
@require_tf
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
from transformers import TFBertModel
lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase_ : Any = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'tf' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Tuple = FeatureExtractionPipeline(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Dict = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(__UpperCamelCase ,__UpperCamelCase )
# Assert all variables are present
self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] ,__UpperCamelCase )
self.assertSequenceEqual(variable_names[3:] ,__UpperCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = ['input_ids', 'attention_mask', 'token_type_ids']
lowercase_ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowercase_ , lowercase_ : int = ensure_valid_input(FuncContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__UpperCamelCase ) ,3 )
# Should have exactly the same input names
self.assertEqual(set(__UpperCamelCase ) ,set(__UpperCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__UpperCamelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase_ , lowercase_ : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__UpperCamelCase ) ,1 )
self.assertEqual(len(__UpperCamelCase ) ,1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] ,tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] ,'input_ids' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
| 321
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCamelCase ( UpperCamelCase__ ):
lowercase = ['pixel_values']
def __init__( self ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = PILImageResampling.BICUBIC ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = True ,__UpperCamelCase = 1 / 255 ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = True ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
lowercase_ : int = size if size is not None else {'shortest_edge': 224}
lowercase_ : List[str] = get_size_dict(__lowerCamelCase ,default_to_square=__lowerCamelCase )
lowercase_ : Optional[int] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowercase_ : int = get_size_dict(__lowerCamelCase ,default_to_square=__lowerCamelCase ,param_name='crop_size' )
lowercase_ : str = do_resize
lowercase_ : Union[str, Any] = size
lowercase_ : List[str] = resample
lowercase_ : Optional[int] = do_center_crop
lowercase_ : List[str] = crop_size
lowercase_ : str = do_rescale
lowercase_ : Union[str, Any] = rescale_factor
lowercase_ : Dict = do_normalize
lowercase_ : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase_ : str = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase_ : List[Any] = do_convert_rgb
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = PILImageResampling.BICUBIC ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = get_size_dict(__lowerCamelCase ,default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase_ : Dict = get_resize_output_image_size(__lowerCamelCase ,size=size['shortest_edge'] ,default_to_square=__lowerCamelCase )
return resize(__lowerCamelCase ,size=__lowerCamelCase ,resample=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Dict:
'''simple docstring'''
lowercase_ : Optional[Any] = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__lowerCamelCase ,size=(size['height'], size['width']) ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
return rescale(__lowerCamelCase ,scale=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> str:
'''simple docstring'''
return normalize(__lowerCamelCase ,mean=__lowerCamelCase ,std=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = ChannelDimension.FIRST ,**__UpperCamelCase ,) -> Optional[int]:
'''simple docstring'''
lowercase_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowercase_ : List[str] = size if size is not None else self.size
lowercase_ : Optional[Any] = get_size_dict(__lowerCamelCase ,param_name='size' ,default_to_square=__lowerCamelCase )
lowercase_ : Tuple = resample if resample is not None else self.resample
lowercase_ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
lowercase_ : Dict = get_size_dict(__lowerCamelCase ,param_name='crop_size' ,default_to_square=__lowerCamelCase )
lowercase_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : str = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Any = image_mean if image_mean is not None else self.image_mean
lowercase_ : Any = image_std if image_std is not None else self.image_std
lowercase_ : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase_ : str = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase_ : Optional[int] = [convert_to_rgb(__lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
lowercase_ : List[Any] = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
lowercase_ : List[Any] = [self.resize(image=__lowerCamelCase ,size=__lowerCamelCase ,resample=__lowerCamelCase ) for image in images]
if do_center_crop:
lowercase_ : int = [self.center_crop(image=__lowerCamelCase ,size=__lowerCamelCase ) for image in images]
if do_rescale:
lowercase_ : Tuple = [self.rescale(image=__lowerCamelCase ,scale=__lowerCamelCase ) for image in images]
if do_normalize:
lowercase_ : Tuple = [self.normalize(image=__lowerCamelCase ,mean=__lowerCamelCase ,std=__lowerCamelCase ) for image in images]
lowercase_ : Union[str, Any] = [to_channel_dimension_format(__lowerCamelCase ,__lowerCamelCase ) for image in images]
lowercase_ : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=__lowerCamelCase ,tensor_type=__lowerCamelCase )
| 367
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]]
lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(__UpperCamelCase ) # fails here
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 )
lowercase_ : str = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 )
lowercase_ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 )
lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 321
| 0
|
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=99 ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=9 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase=8 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.002 ,__UpperCamelCase=1 ,__UpperCamelCase=0 ,__UpperCamelCase=0 ,__UpperCamelCase=None ,__UpperCamelCase=None ,) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = parent
lowercase_ : Any = batch_size
lowercase_ : Union[str, Any] = encoder_seq_length
lowercase_ : str = decoder_seq_length
# For common tests
lowercase_ : str = self.decoder_seq_length
lowercase_ : Optional[int] = is_training
lowercase_ : Union[str, Any] = use_attention_mask
lowercase_ : Any = use_labels
lowercase_ : Optional[Any] = vocab_size
lowercase_ : Any = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : List[str] = num_attention_heads
lowercase_ : Optional[int] = d_ff
lowercase_ : Optional[Any] = relative_attention_num_buckets
lowercase_ : str = dropout_rate
lowercase_ : str = initializer_factor
lowercase_ : Optional[int] = eos_token_id
lowercase_ : List[str] = pad_token_id
lowercase_ : int = decoder_start_token_id
lowercase_ : Dict = None
lowercase_ : Dict = decoder_layers
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return TaConfig.from_pretrained('google/umt5-base' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,) -> Tuple:
'''simple docstring'''
if attention_mask is None:
lowercase_ : int = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowercase_ : Optional[int] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowercase_ : Tuple = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=_lowerCamelCase )
if decoder_head_mask is None:
lowercase_ : Dict = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=_lowerCamelCase )
if cross_attn_head_mask is None:
lowercase_ : int = torch.ones(
config.num_decoder_layers ,config.num_attention_heads ,device=_lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size )
lowercase_ : str = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowercase_ : Optional[Any] = input_ids.clamp(self.pad_token_id + 1 )
lowercase_ : Dict = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowercase_ : Tuple = self.get_config()
lowercase_ : Any = config.num_attention_heads
lowercase_ : Union[str, Any] = self.prepare_inputs_dict(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
return config, input_dict
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ , lowercase_ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return TaConfig(
vocab_size=166 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> Tuple:
'''simple docstring'''
lowercase_ : Union[str, Any] = UMTaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowercase_ : Optional[int] = model(
input_ids=_lowerCamelCase ,decoder_input_ids=_lowerCamelCase ,attention_mask=_lowerCamelCase ,decoder_attention_mask=_lowerCamelCase ,)
lowercase_ : str = model(input_ids=_lowerCamelCase ,decoder_input_ids=_lowerCamelCase )
lowercase_ : Dict = result.last_hidden_state
lowercase_ : Dict = result.past_key_values
lowercase_ : Optional[int] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_lowerCamelCase ) ,config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) ,4 )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = UMTaModel(config=_lowerCamelCase ).get_decoder().to(_lowerCamelCase ).eval()
# first forward pass
lowercase_ : Tuple = model(_lowerCamelCase ,use_cache=_lowerCamelCase )
lowercase_ : List[str] = model(_lowerCamelCase )
lowercase_ : Tuple = model(_lowerCamelCase ,use_cache=_lowerCamelCase )
self.parent.assertTrue(len(_lowerCamelCase ) == len(_lowerCamelCase ) )
self.parent.assertTrue(len(_lowerCamelCase ) == len(_lowerCamelCase ) + 1 )
lowercase_ , lowercase_ : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase_ : str = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# append to next input_ids and
lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowercase_ : Union[str, Any] = model(_lowerCamelCase )['last_hidden_state']
lowercase_ : List[str] = model(_lowerCamelCase ,past_key_values=_lowerCamelCase )['last_hidden_state']
# select random slice
lowercase_ : Dict = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowercase_ : Optional[int] = output_from_no_past[:, -1, random_slice_idx].detach()
lowercase_ : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=1e-3 ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = UMTaModel(config=_lowerCamelCase ).to(_lowerCamelCase ).half().eval()
lowercase_ : Optional[Any] = model(**_lowerCamelCase )['last_hidden_state']
self.parent.assertFalse(torch.isnan(_lowerCamelCase ).any().item() )
@require_torch
class UpperCamelCase ( A_ , A_ , A_ , unittest.TestCase ):
lowercase = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowercase = (UMTaForConditionalGeneration,) if is_torch_available() else ()
lowercase = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
lowercase = True
lowercase = True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowercase = [0.8, 0.9]
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : List[str] = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
lowercase_ : int = UMTaModel(config_and_inputs[0] ).to(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_lowerCamelCase ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,f'''{tmpdirname}/t5_test.onnx''' ,export_params=_lowerCamelCase ,opset_version=9 ,input_names=['input_ids', 'decoder_input_ids'] ,)
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_lowerCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Optional[Any] = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
lowercase_ : Any = config_and_inputs[0]
lowercase_ : str = UMTaForConditionalGeneration(_lowerCamelCase ).eval()
model.to(_lowerCamelCase )
lowercase_ : Optional[Any] = {
'head_mask': torch.zeros(config.num_layers ,config.num_heads ,device=_lowerCamelCase ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=_lowerCamelCase ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=_lowerCamelCase ),
}
for attn_name, (name, mask) in zip(_lowerCamelCase ,head_masking.items() ):
lowercase_ : Any = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowercase_ : int = torch.ones(
config.num_decoder_layers ,config.num_heads ,device=_lowerCamelCase )
lowercase_ : List[Any] = model.generate(
config_and_inputs[1]['input_ids'] ,num_beams=1 ,max_length=3 ,output_attentions=_lowerCamelCase ,return_dict_in_generate=_lowerCamelCase ,**_lowerCamelCase ,)
# We check the state of decoder_attentions and cross_attentions just from the last step
lowercase_ : Union[str, Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Tuple = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' ,return_dict=_lowerCamelCase ).to(_lowerCamelCase )
lowercase_ : int = AutoTokenizer.from_pretrained('google/umt5-small' ,use_fast=_lowerCamelCase ,legacy=_lowerCamelCase )
lowercase_ : Dict = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
lowercase_ : Optional[Any] = tokenizer(_lowerCamelCase ,return_tensors='pt' ,padding=_lowerCamelCase ).input_ids
# fmt: off
lowercase_ : List[str] = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(_lowerCamelCase ,_lowerCamelCase )
lowercase_ : Tuple = model.generate(input_ids.to(_lowerCamelCase ) )
lowercase_ : Tuple = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
lowercase_ : Tuple = tokenizer.batch_decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
| 368
|
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
def get_masked_lm_array(__SCREAMING_SNAKE_CASE : str ):
lowercase_ : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[Any] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_array(__SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : Tuple = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : List[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[str] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_attention_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = array.reshape(__SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[str] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
print(F'''Loading model based on config from {config_path}...''' )
lowercase_ : Any = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = BertForMaskedLM(__SCREAMING_SNAKE_CASE )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
lowercase_ : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
lowercase_ : BertSelfAttention = layer.attention.self
lowercase_ : str = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_query_dense/kernel' , self_attn.query.weight.data.shape )
lowercase_ : Tuple = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_query_dense/bias' , self_attn.query.bias.data.shape )
lowercase_ : Tuple = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_key_dense/kernel' , self_attn.key.weight.data.shape )
lowercase_ : int = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_key_dense/bias' , self_attn.key.bias.data.shape )
lowercase_ : Dict = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_value_dense/kernel' , self_attn.value.weight.data.shape )
lowercase_ : List[Any] = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
lowercase_ : BertSelfOutput = layer.attention.output
lowercase_ : Dict = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_output_dense/kernel' , self_output.dense.weight.data.shape )
lowercase_ : Any = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_output_dense/bias' , self_output.dense.bias.data.shape )
lowercase_ : Tuple = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/gamma' )
lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/beta' )
# Intermediate
lowercase_ : BertIntermediate = layer.intermediate
lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/kernel' )
lowercase_ : Optional[int] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/bias' )
# Output
lowercase_ : BertOutput = layer.output
lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/kernel' )
lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/bias' )
lowercase_ : List[str] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/gamma' )
lowercase_ : int = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/beta' )
# Embeddings
lowercase_ : Optional[Any] = get_encoder_array('_position_embedding_layer/embeddings' )
lowercase_ : int = get_encoder_array('_type_embedding_layer/embeddings' )
lowercase_ : Any = get_encoder_array('_embedding_norm_layer/gamma' )
lowercase_ : Optional[Any] = get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
lowercase_ : int = model.cls.predictions.transform
lowercase_ : str = get_masked_lm_array('dense/kernel' )
lowercase_ : Optional[Any] = get_masked_lm_array('dense/bias' )
lowercase_ : Optional[Any] = get_masked_lm_array('layer_norm/gamma' )
lowercase_ : Optional[int] = get_masked_lm_array('layer_norm/beta' )
lowercase_ : List[str] = get_masked_lm_array('embedding_table' )
# Pooling
lowercase_ : Optional[Any] = BertPooler(config=__SCREAMING_SNAKE_CASE )
lowercase_ : BertPooler = get_encoder_array('_pooler_layer/kernel' )
lowercase_ : BertPooler = get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(__SCREAMING_SNAKE_CASE )
# Integration test - should load without any errors ;)
lowercase_ : Tuple = BertForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 321
| 0
|
"""simple docstring"""
class UpperCamelCase :
def __init__( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : dict[str, TrieNode] = {} # Mapping from char to TrieNode
lowercase_ : str = False
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
for word in words:
self.insert(_a )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[str] = self
for char in word:
if char not in curr.nodes:
lowercase_ : str = TrieNode()
lowercase_ : Any = curr.nodes[char]
lowercase_ : str = True
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : int = self
for char in word:
if char not in curr.nodes:
return False
lowercase_ : Any = curr.nodes[char]
return curr.is_leaf
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
def _delete(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> bool:
if index == len(_a ):
# If word does not exist
if not curr.is_leaf:
return False
lowercase_ : Tuple = False
return len(curr.nodes ) == 0
lowercase_ : Union[str, Any] = word[index]
lowercase_ : List[Any] = curr.nodes.get(_a )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowercase_ : List[Any] = _delete(_a ,_a ,index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self ,_a ,0 )
def lowercase__( __SCREAMING_SNAKE_CASE : TrieNode , __SCREAMING_SNAKE_CASE : str ):
if node.is_leaf:
print(_snake_case , end=' ' )
for key, value in node.nodes.items():
print_words(_snake_case , word + key )
def lowercase__( ):
lowercase_ : Dict = "banana bananas bandana band apple all beast".split()
lowercase_ : Optional[Any] = TrieNode()
root.insert_many(_snake_case )
# print_words(root, "")
assert all(root.find(_snake_case ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : bool ):
print(str(_snake_case ) , 'works!' if passes else 'doesn\'t work :(' )
def lowercase__( ):
assert test_trie()
def lowercase__( ):
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main()
| 369
|
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
__SCREAMING_SNAKE_CASE =namedtuple("covid_data", "cases deaths recovered")
def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ):
lowercase_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 321
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE ={"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 370
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 321
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
return [ord(_lowerCAmelCase ) - 96 for elem in plain]
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ):
return "".join(chr(elem + 96 ) for elem in encoded )
def lowercase__( ):
lowercase_ : List[str] = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , _lowerCAmelCase )
print('Decoded:' , decode(_lowerCAmelCase ) )
if __name__ == "__main__":
main()
| 371
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=33 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_input_mask
lowercase_ : Optional[Any] = use_token_type_ids
lowercase_ : List[str] = use_labels
lowercase_ : Any = vocab_size
lowercase_ : List[str] = hidden_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : Tuple = max_position_embeddings
lowercase_ : Optional[int] = type_vocab_size
lowercase_ : Optional[int] = type_sequence_label_size
lowercase_ : Dict = initializer_range
lowercase_ : int = num_labels
lowercase_ : Any = num_choices
lowercase_ : int = scope
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Dict = None
if self.use_input_mask:
lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Tuple = None
lowercase_ : Tuple = None
lowercase_ : Tuple = None
if self.use_labels:
lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
lowercase_ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : List[Any] = EsmModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Tuple = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Union[str, Any] = model(__UpperCamelCase )
lowercase_ : int = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = EsmForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = self.num_labels
lowercase_ : int = EsmForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Any = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Optional[int] = config_and_inputs
lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = False
lowercase = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = ()
lowercase = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = EsmModelTester(self )
lowercase_ : List[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ : Optional[Any] = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = EsmModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowercase_ : str = EsmEmbeddings(config=__UpperCamelCase )
lowercase_ : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowercase_ : List[Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowercase_ : Tuple = create_position_ids_from_input_ids(__UpperCamelCase ,model.padding_idx )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowercase_ : List[Any] = EsmEmbeddings(config=__UpperCamelCase )
lowercase_ : List[Any] = torch.empty(2 ,4 ,30 )
lowercase_ : List[str] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowercase_ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowercase_ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCamelCase ( lowercase_ ):
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
with torch.no_grad():
lowercase_ : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase_ : List[str] = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = 33
lowercase_ : Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : List[str] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
lowercase_ : int = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase_ : Dict = model(__UpperCamelCase )[0]
# compare the actual values for a slice.
lowercase_ : Any = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321
| 0
|
"""simple docstring"""
from datetime import datetime
import requests
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : str = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
lowercase_ : Union[str, Any] = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(__lowerCAmelCase ).content
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =input("Enter Video/IGTV url: ").strip()
__SCREAMING_SNAKE_CASE =f"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f"Done. Video saved to disk as {file_name}.")
| 350
|
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[int] = bp_numa
lowercase_ : Dict = bp_numa
lowercase_ : Tuple = bp_numa
lowercase_ : List[Any] = conva_get[:2]
lowercase_ : int = conva_get[2]
lowercase_ : Dict = size_pa
lowercase_ : int = rate_w
lowercase_ : Union[str, Any] = rate_t
lowercase_ : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1
lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(__UpperCamelCase ,'wb' ) as f:
pickle.dump(__UpperCamelCase ,__UpperCamelCase )
print(f'''Model saved: {save_path}''' )
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
with open(__UpperCamelCase ,'rb' ) as f:
lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301
lowercase_ : str = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' )
lowercase_ : Optional[Any] = model_dic.get('num_bp1' )
lowercase_ : str = model_dic.get('num_bp2' )
lowercase_ : Optional[Any] = model_dic.get('num_bp3' )
lowercase_ : Union[str, Any] = model_dic.get('rate_weight' )
lowercase_ : Optional[int] = model_dic.get('rate_thre' )
# create model instance
lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# modify model parameter
lowercase_ : Optional[Any] = model_dic.get('w_conv1' )
lowercase_ : Tuple = model_dic.get('wkj' )
lowercase_ : Union[str, Any] = model_dic.get('vji' )
lowercase_ : Optional[Any] = model_dic.get('thre_conv1' )
lowercase_ : Dict = model_dic.get('thre_bp2' )
lowercase_ : Optional[int] = model_dic.get('thre_bp3' )
return conv_ins
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return round(__UpperCamelCase ,3 )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = convs[0]
lowercase_ : Any = convs[1]
lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0]
# get the data slice of original image data, data_focus
lowercase_ : Tuple = []
for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
lowercase_ : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__UpperCamelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase_ : Dict = []
lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__UpperCamelCase ):
lowercase_ : Tuple = []
for i_focus in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[int] = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__UpperCamelCase ) )
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape(
__UpperCamelCase ,__UpperCamelCase )
data_featuremap.append(__UpperCamelCase )
# expanding the data slice to One dimenssion
lowercase_ : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) )
lowercase_ : str = np.asarray(__UpperCamelCase )
return focus_list, data_featuremap
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple:
'''simple docstring'''
lowercase_ : Union[str, Any] = len(featuremaps[0] )
lowercase_ : str = int(size_map / size_pooling )
lowercase_ : Optional[int] = []
for i_map in range(len(__UpperCamelCase ) ):
lowercase_ : int = featuremaps[i_map]
lowercase_ : List[str] = []
for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[str] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__UpperCamelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__UpperCamelCase ) )
lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase )
featuremap_pooled.append(__UpperCamelCase )
return featuremap_pooled
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Tuple = []
for i in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[Any] = np.shape(data[i] )
lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] )
lowercase_ : List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(__UpperCamelCase )
lowercase_ : int = np.asarray(__UpperCamelCase )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Any = np.asarray(__UpperCamelCase )
lowercase_ : Any = np.shape(__UpperCamelCase )
lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Any = []
lowercase_ : List[Any] = 0
for i_map in range(__UpperCamelCase ):
lowercase_ : List[str] = np.ones((size_map, size_map) )
for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = pd_pool[
i_pool
]
lowercase_ : Any = i_pool + 1
lowercase_ : Optional[int] = np.multiply(
__UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(__UpperCamelCase )
return pd_all
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]:
'''simple docstring'''
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) )
print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) )
lowercase_ : int = 0
lowercase_ : Tuple = []
lowercase_ : Tuple = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowercase_ : List[str] = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(__UpperCamelCase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase_ : int = np.asmatrix(datas_train[p] )
lowercase_ : Any = np.asarray(datas_teach[p] )
lowercase_ , lowercase_ : Tuple = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : Optional[int] = np.shape(__UpperCamelCase )
lowercase_ : Optional[int] = self._expand(__UpperCamelCase )
lowercase_ : int = data_bp_input
lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa
lowercase_ : Dict = self.sig(__UpperCamelCase )
lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa
lowercase_ : int = self.sig(__UpperCamelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase_ : str = np.multiply(
(data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Optional[int] = np.multiply(
np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji )
lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase_ : Dict = pd_conva_pooled.T.getA().tolist()
lowercase_ : List[Any] = self._calculate_gradient_from_pool(
__UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase_ : Dict = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre
lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase_ : int = rp + 1
lowercase_ : Union[str, Any] = error_count / patterns
all_mse.append(__UpperCamelCase )
def draw_error():
lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__UpperCamelCase ,'+-' )
plt.plot(__UpperCamelCase ,'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(__UpperCamelCase ,alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) )
for p in range(len(__UpperCamelCase ) ):
lowercase_ : List[Any] = np.asmatrix(datas_test[p] )
lowercase_ , lowercase_ : Optional[Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : List[str] = self._expand(__UpperCamelCase )
lowercase_ : Any = data_bp_input
lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa
lowercase_ : str = self.sig(__UpperCamelCase )
lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa
lowercase_ : Optional[int] = self.sig(__UpperCamelCase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out]
return np.asarray(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase )
lowercase_ , lowercase_ : Union[str, Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 321
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE ={
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 351
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
lowercase_ : Dict = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : str = 'patrickvonplaten/t5-tiny-random'
lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] )
lowercase_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__UpperCamelCase ):
self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'current' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
| 321
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE ={"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =["""GLPNFeatureExtractor"""]
__SCREAMING_SNAKE_CASE =["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 352
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( lowercase_ ):
lowercase = ['input_values', 'padding_mask']
def __init__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = 2_4000 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
super().__init__(feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : List[str] = chunk_length_s
lowercase_ : Tuple = overlap
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
lowercase_ : Optional[int] = True
lowercase_ : Optional[int] = bool(
isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ):
lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa )
elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowercase_ : List[str] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowercase_ : Dict = [np.asarray(__UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__UpperCamelCase ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
lowercase_ : Optional[int] = None
lowercase_ : List[Any] = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowercase_ : List[Any] = min(array.shape[0] for array in raw_audio )
lowercase_ : int = int(np.floor(max_length / self.chunk_stride ) )
lowercase_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowercase_ : List[Any] = max(array.shape[0] for array in raw_audio )
lowercase_ : Tuple = int(np.ceil(max_length / self.chunk_stride ) )
lowercase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
lowercase_ : Union[str, Any] = 'max_length'
else:
lowercase_ : int = input_values
# normal padding on batch
if padded_inputs is None:
lowercase_ : int = self.pad(
__UpperCamelCase ,max_length=__UpperCamelCase ,truncation=__UpperCamelCase ,padding=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,)
if padding:
lowercase_ : Optional[int] = padded_inputs.pop('attention_mask' )
lowercase_ : Dict = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
lowercase_ : Optional[int] = example[..., None]
input_values.append(example.T )
lowercase_ : str = input_values
if return_tensors is not None:
lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs
| 321
| 0
|
"""simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__SCREAMING_SNAKE_CASE ="\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
__SCREAMING_SNAKE_CASE ="\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
__SCREAMING_SNAKE_CASE ="\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n"
def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple ):
return float((preds == labels).mean() )
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : List[Any] = simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )
lowercase_ : int = float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : Optional[Any] = float(pearsonr(lowerCamelCase__ , lowerCamelCase__ )[0] )
lowercase_ : str = float(spearmanr(lowerCamelCase__ , lowerCamelCase__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", '
'\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__UpperCamelCase ,__UpperCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(__UpperCamelCase ,__UpperCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__UpperCamelCase ,__UpperCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__UpperCamelCase ,__UpperCamelCase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", '
'\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]' )
| 353
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 321
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE ={
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 354
|
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ):
require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
| 321
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.